% % This file was created by the Typo3 extension % sevenpack version 0.7.14 % % --- Timezone: CEST % Creation date: 2018-04-23 % Creation time: 11-37-30 % --- Number of references % 147 % @Book { 3708, title = {Abstrakte Haltung: Kurt Goldstein im Spannungsfeld von Neurologie, Psychologie and Philosophie}, year = {2000}, pages = {250}, publisher = {K{\"o}nigshausen und Neumann}, address = {W{\"u}rzburg, Germany}, series = {Epistemata: W{\"u}rzburger Wissenschaftliche Schriften}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {de}, ISBN = {3-8260-1775-7}, author = {Noppeney, U} } @Article { RoheN2018, title = {Reliability-Weighted Integration of Audiovisual Signals Can Be Modulated by Top-down Attention}, journal = {eNeuro}, year = {2018}, month = {2}, volume = {5}, number = {1}, pages = {1-20}, abstract = {Behaviorally, it is well established that human observers integrate signals near-optimally weighted in proportion to their reliabilities as predicted by maximum likelihood estimation. Yet, despite abundant behavioral evidence, it is unclear how the human brain accomplishes this feat. In a spatial ventriloquist paradigm, participants were presented with auditory, visual, and audiovisual signals and reported the location of the auditory or the visual signal. Combining psychophysics, multivariate functional MRI (fMRI) decoding, and models of maximum likelihood estimation (MLE), we characterized the computational operations underlying audiovisual integration at distinct cortical levels. We estimated observers' behavioral weights by fitting psychometric functions to participants' localization responses. Likewise, we estimated the neural weights by fitting neurometric functions to spatial locations decoded from regional fMRI activation patterns. Our results demonstrate that low-level auditory and visual areas encode predominantly the spatial location of the signal component of a region's preferred auditory (or visual) modality. By contrast, intraparietal sulcus forms spatial representations by integrating auditory and visual signals weighted by their reliabilities. Critically, the neural and behavioral weights and the variance of the spatial representations depended not only on the sensory reliabilities as predicted by the MLE model but also on participants' modality-specific attention and report (i.e., visual vs. auditory). These results suggest that audiovisual integration is not exclusively determined by bottom-up sensory reliabilities. Instead, modality-specific attention and report can flexibly modulate how intraparietal sulcus integrates sensory signals into spatial representations to guide behavioral responses (e.g., localization and orienting).}, department = {Research Group Noppeney}, web_url = {http://www.eneuro.org/content/eneuro/early/2018/02/16/ENEURO.0315-17.2018.full.pdf}, DOI = {10.1523/ENEURO.0315-17.2018}, EPUB = {e0315}, author = {Rohe, T and Noppeney, U} } @Article { LeitaoTLTN2017, title = {Transcranial magnetic stimulation of right inferior parietal cortex causally influences prefrontal activation for visual detection}, journal = {European Journal of Neuroscience}, year = {2017}, month = {12}, volume = {46}, number = {12}, pages = {2807–2816}, abstract = {For effective interactions with the environment the brain needs to form perceptual decisions based on noisy sensory evidence. Accumulating evidence suggests that perceptual decisions are formed by widespread interactions amongst sensory areas representing the noisy sensory evidence and fronto-parietal areas integrating the evidence into a decision variable that is compared to a decisional threshold. This concurrent transcranial magnetic stimulation (TMS)-fMRI study applied 10 Hz bursts of four TMS (or Sham) pulses to the intraparietal sulcus (IPS) to investigate the causal influence of IPS on the neural systems involved in perceptual decision making. Participants had to detect visual signals at threshold intensity that were presented in their left lower visual field on 50\% of the trials. Critically, we adjusted the signal strength such that participants failed to detect the visual stimulus on approximately 30\% of the trials allowing us to categorize trials into hits, misses and correct rejections. Our results show that IPS- relative to Sham-TMS attenuated activation increases for misses relative to correct rejections in the left middle and superior frontal gyri. Critically, while IPS-TMS did not significantly affect participants’ performance accuracy, it affected how observers adjusted their response times after making an error. We therefore suggest that activation increases in superior frontal gyri for misses relative to correct responses may not be critical for signal detection performance, but rather reflect post-decisional processing such as metacognitive monitoring of choice accuracy or decisional confidence.}, department = {Department Scheffler}, department2 = {Research Group Noppeney}, web_url = {http://onlinelibrary.wiley.com/doi/10.1111/ejn.13743/epdf}, state = {accepted}, DOI = {10.1111/ejn.13743}, author = {Leit{\~a}o, J and Thielscher, A and Lee, HL and Tuennerhoff, J and Noppeney, U} } @Article { LeitaoTTN2017, title = {Comparing TMS perturbations to occipital and parietal cortices in concurrent TMS-fMRI studies: Methodological considerations}, journal = {PLoS ONE}, year = {2017}, month = {8}, volume = {12}, number = {8}, pages = {1-20}, abstract = {Neglect and hemianopia are two neuropsychological syndromes that are associated with reduced awareness for visual signals in patients’ contralesional hemifield. They offer the unique possibility to dissociate the contributions of retino-geniculate and retino-colliculo circuitries in visual perception. Yet, insights from patient fMRI studies are limited by heterogeneity in lesion location and extent, long-term functional reorganization and behavioural compensation after stroke. Transcranial magnetic stimulation (TMS) has therefore been proposed as a complementary method to investigate the effect of transient perturbations on functional brain organization. This concurrent TMS-fMRI study applied TMS perturbation to occipital and parietal cortices with the aim to ‘mimick’ neglect and hemianopia. Based on the challenges and interpretational limitations of our own study we aim to provide tutorial guidance on how future studies should compare TMS to primary sensory and association areas that are governed by distinct computational principles, neural dynamics and functional architecture.}, department = {Department Scheffler}, department2 = {Research Group Noppeney}, web_url = {http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0181438\&type=printable}, DOI = {10.1371/journal.pone.0181438}, EPUB = {e0181438}, author = {Leit{\~a}o, J and Thielscher, A and Tuennerhoff, J and Noppeney, U} } @Article { RoheN2016, title = {Distinct Computational Principles Govern Multisensory Integration in Primary Sensory and Association Cortices}, journal = {Current Biology}, year = {2016}, month = {2}, volume = {26}, number = {4}, pages = {509–514}, abstract = {Human observers typically integrate sensory signals in a statistically optimal fashion into a coherent percept by weighting them in proportion to their reliabilities [1, 2, 3 and 4]. An emerging debate in neuroscience is to which extent multisensory integration emerges already in primary sensory areas or is deferred to higher-order association areas [5, 6, 7, 8 and 9]. This fMRI study used multivariate pattern decoding to characterize the computational principles that define how auditory and visual signals are integrated into spatial representations across the cortical hierarchy. Our results reveal small multisensory influences that were limited to a spatial window of integration in primary sensory areas. By contrast, parietal cortices integrated signals weighted by their sensory reliabilities and task relevance in line with behavioral performance and principles of statistical optimality. Intriguingly, audiovisual integration in parietal cortices was attenuated for large spatial disparities when signals were unlikely to originate from a common source. Our results demonstrate that multisensory interactions in primary and association cortices are governed by distinct computational principles. In primary visual cortices, spatial disparity controlled the influence of non-visual signals on the formation of spatial representations, whereas in parietal cortices, it determined the influence of task-irrelevant signals. Critically, only parietal cortices integrated signals weighted by their bottom-up reliabilities and top-down task relevance into multisensory spatial priority maps to guide spatial orienting.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S0960982215015870}, DOI = {10.1016/j.cub.2015.12.056}, author = {Rohe, T and Noppeney, U} } @Article { GauN2015, title = {How prior expectations shape multisensory perception}, journal = {NeuroImage}, year = {2016}, month = {1}, volume = {124}, number = {Part A}, pages = {876–886}, abstract = {The brain generates a representation of our environment by integrating signals from a common source, but segregating signals from different sources. This fMRI study investigated how the brain arbitrates between perceptual integration and segregation based on top-down congruency expectations and bottom-up stimulus-bound congruency cues. Participants were presented audiovisual movies of phonologically congruent, incongruent or McGurk-MacDonald syllables that can be integrated into an illusory percept (e.g. ''ti'' percept for visual «ki» with auditory /pi/). They reported the syllable they perceived. Critically, we manipulated participants' top-down congruency expectations by presenting McGurk-MacDonald stimuli embedded in blocks of congruent or incongruent syllables. Behaviourally, participants were more likely to fuse audiovisual signals into an illusory McGurk-MacDonald percept in congruent than incongruent contexts. At the neural level, the left inferior frontal sulcus (lIFS) showed increased activations for bottom-up incongruent relative to congruent inputs. Moreover, lIFS activations were increased for physically identical McGurk-MacDonald signals, when participants segregated signals and reported their auditory percept. Critically, this activation increase for perceptual segregation was amplified when participants expected audiovisually incongruent signals based on prior sensory experience. Collectively, our results demonstrate that the lIFS combines top-down prior (in)congruency expectations with bottom-up (in)congruency cues to arbitrate between multisensory integration and segregation.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811915008605}, DOI = {10.1016/j.neuroimage.2015.09.045}, author = {Gau, R and Noppeney, U} } @Article { TuennerhoffN2015, title = {When sentences live up to your expectations}, journal = {NeuroImage}, year = {2016}, month = {1}, volume = {124}, number = {Part A}, pages = {641–653}, abstract = {Speech recognition is rapid, automatic and amazingly robust. How the brain is able to decode speech from noisy acoustic inputs is unknown. We show that the brain recognizes speech by integrating bottom-up acoustic signals with top-down predictions. Subjects listened to intelligible normal and unintelligible fine structure speech that lacked the predictability of the temporal envelope and did not enable access to higher linguistic representations. Their top-down predictions were manipulated using priming. Activation for unintelligible fine structure speech was confined to primary auditory cortices, but propagated into posterior middle temporal areas when fine structure speech was made intelligible by top-down predictions. By contrast, normal speech engaged posterior middle temporal areas irrespective of subjects’ predictions. Critically, when speech violated subjects’ expectations, activation increases in anterior temporal gyri/sulci signalled a prediction error and the need for new semantic integration. In line with predictive coding, our findings compellingly demonstrate that top-down predictions determine whether and how the brain translates bottom-up acoustic inputs into intelligible speech.}, department = {Department Logothetis}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811915008010}, DOI = {10.1016/j.neuroimage.2015.09.004}, author = {Tuennerhoff, J and Noppeney, U} } @Article { GianiBOKN2015, title = {Detecting tones in complex auditory scenes}, journal = {NeuroImage}, year = {2015}, month = {11}, volume = {122}, pages = {203–213}, abstract = {In everyday life, our auditory system is bombarded with many signals in complex auditory scenes. Limited processing capacities allow only a fraction of these signals to enter perceptual awareness. This magnetoencephalography (MEG) study used informational masking to identify the neural mechanisms that enable auditory awareness. On each trial, participants indicated whether they detected a pair of sequentially presented tones (i.e., the target) that were embedded within a multi-tone background. We analysed MEG activity for ‘hits’ and ‘misses’, separately for the first and second tones within a target pair. Comparing physically identical stimuli that were detected or missed provided insights into the neural processes underlying auditory awareness. While the first tone within a target elicited a stronger early P50m on hit trials, only the second tone evoked a negativity at 150 ms, which may index segregation of the tone pair from the multi-tone background. Notably, a later sustained deflection peaking around 300 and 500 ms (P300m) was the only component that was significantly amplified for both tones, when they were detected pointing towards its key role in perceptual awareness. Additional Dynamic Causal Modelling analyses indicated that the negativity at 150 ms underlying auditory stream segregation is mediated predominantly via changes in intrinsic connectivity within auditory cortices. By contrast, the later P300m response as a signature of perceptual awareness relies on interactions between parietal and auditory cortices. In conclusion, our results suggest that successful detection and hence auditory awareness of a two-tone pair within complex auditory scenes rely on recurrent processing between auditory and higher-order parietal cortices.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811915006084}, DOI = {10.1016/j.neuroimage.2015.07.001}, author = {Giani, AS and Belardinelli, P and Ortiz, E and Kleiner, M and Noppeney, U} } @Article { LeitaoTTN2015, title = {Concurrent TMS-fMRI Reveals Interactions between Dorsal and Ventral Attentional Systems}, journal = {Journal of Neuroscience}, year = {2015}, month = {8}, volume = {35}, number = {32}, pages = {11445-11457}, abstract = {Adaptive behavior relies on combining bottom-up sensory inputs with top-down control signals to guide responses in line with current goals and task demands. Over the past decade, accumulating evidence has suggested that the dorsal and ventral frontoparietal attentional systems are recruited interactively in this process. This fMRI study used concurrent transcranial magnetic stimulation (TMS) as a causal perturbation approach to investigate the interactions between dorsal and ventral attentional systems and sensory processing areas. In a sustained spatial attention paradigm, human participants detected weak visual targets that were presented in the lower-left visual field on 50\% of the trials. Further, we manipulated the presence/absence of task-irrelevant auditory signals. Critically, on each trial we applied 10 Hz bursts of four TMS (or Sham) pulses to the intraparietal sulcus (IPS). IPS-TMS relative to Sham-TMS increased activation in the parietal cortex regardless of sensory stimulation, confirming the neural effectiveness of TMS stimulation. Visual targets increased activations in the anterior insula, a component of the ventral attentional system responsible for salience detection. Conversely, they decreased activations in the ventral visual areas. Importantly, IPS-TMS abolished target-evoked activation increases in the right temporoparietal junction (TPJ) of the ventral attentional system, whereas it eliminated target-evoked activation decreases in the right fusiform. Our results demonstrate that IPS-TMS exerts profound directional causal influences not only on visual areas but also on the TPJ as a critical component of the ventral attentional system. They reveal a complex interplay between dorsal and ventral attentional systems during target detection under sustained spatial attention.}, department = {Department Scheffler}, department2 = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/content/35/32/11445.full.pdf+html}, DOI = {10.1523/JNEUROSCI.0939-15.2015}, author = {Leit{\~a}o, J and Thielscher, A and T{\"u}nnerhoff, J and Noppeney, U} } @Article { RoheN2015_2, title = {Sensory reliability shapes perceptual inference via two mechanisms}, journal = {Journal of Vision}, year = {2015}, month = {4}, volume = {15}, number = {5}, pages = {1-16}, abstract = {To obtain a coherent percept of the environment, the brain should integrate sensory signals from common sources and segregate those from independent sources. Recent research has demonstrated that humans integrate audiovisual information during spatial localization consistent with Bayesian Causal Inference (CI). However, the decision strategies that human observers employ for implicit and explicit CI remain unclear. Further, despite the key role of sensory reliability in multisensory integration, Bayesian CI has never been evaluated across a wide range of sensory reliabilities. This psychophysics study presented participants with spatially congruent and discrepant audiovisual signals at four levels of visual reliability. Participants localized the auditory signals (implicit CI) and judged whether auditory and visual signals came from common or independent sources (explicit CI). Our results demonstrate that humans employ model averaging as a decision strategy for implicit CI; they report an auditory spatial estimate that averages the spatial estimates under the two causal structures weighted by their posterior probabilities. Likewise, they explicitly infer a common source during the common-source judgment when the posterior probability for a common source exceeds a fixed threshold of 0.5. Critically, sensory reliability shapes multisensory integration in Bayesian CI via two distinct mechanisms: First, higher sensory reliability sensitizes humans to spatial disparity and thereby sharpens their multisensory integration window. Second, sensory reliability determines the relative signal weights in multisensory integration under the assumption of a common source. In conclusion, our results demonstrate that Bayesian CI is fundamental for integrating signals of variable reliabilities.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2288871}, DOI = {10.1167/15.5.22}, author = {Rohe, T and Noppeney, U} } @Article { AllerGCWN2015, title = {A spatially collocated sound thrusts a flash into awareness}, journal = {Frontiers in Integrative Neuroscience}, year = {2015}, month = {2}, volume = {9}, number = {16}, pages = {1-8}, abstract = {To interact effectively with the environment the brain integrates signals from multiple senses. It is currently unclear to what extent spatial information can be integrated across different senses in the absence of awareness. Combining dynamic continuous flash suppression and spatial audiovisual stimulation, the current study investigated whether a sound facilitates a concurrent visual flash to elude flash suppression and enter perceptual awareness depending on audiovisual spatial congruency. Our results demonstrate that a concurrent sound boosts unaware visual signals into perceptual awareness. Critically, this process depended on the spatial congruency of the auditory and visual signals pointing towards low level mechanisms of audiovisual integration. Moreover, the concurrent sound biased the reported location of the flash as a function of flash visibility. The spatial bias of sounds on reported flash location was strongest for flashes that were judged invisible. Our results suggest that multisensory integration is a critical mechanism that enables signals to enter conscious perception.}, department = {Research Group Noppeney}, department2 = {Department Logothetis}, department3 = {Research Group Ernst}, department4 = {Department B{\"u}lthoff}, web_url = {http://journal.frontiersin.org/Article/10.3389/fnint.2015.00016/pdf}, DOI = {10.3389/fnint.2015.00016}, author = {Aller, M and Giani, A and Conrad, V and Watanabe, M and Noppeney, U} } @Article { RoheN2015, title = {Cortical Hierarchies Perform Bayesian Causal Inference in Multisensory Perception}, journal = {PLoS Biology}, year = {2015}, month = {2}, volume = {13}, number = {2}, pages = {1-18}, abstract = {To form a veridical percept of the environment, the brain needs to integrate sensory signals from a common source but segregate those from independent sources. Thus, perception inherently relies on solving the “causal inference problem.” Behaviorally, humans solve this problem optimally as predicted by Bayesian Causal Inference; yet, the underlying neural mechanisms are unexplored. Combining psychophysics, Bayesian modeling, functional magnetic resonance imaging (fMRI), and multivariate decoding in an audiovisual spatial localization task, we demonstrate that Bayesian Causal Inference is performed by a hierarchy of multisensory processes in the human brain. At the bottom of the hierarchy, in auditory and visual areas, location is represented on the basis that the two signals are generated by independent sources (= segregation). At the next stage, in posterior intraparietal sulcus, location is estimated under the assumption that the two signals are from a common source (= forced fusion). Only at the top of the hierarchy, in anterior intraparietal sulcus, the uncertainty about the causal structure of the world is taken into account and sensory signals are combined as predicted by Bayesian Causal Inference. Characterizing the computational operations of signal interactions reveals the hierarchical nature of multisensory perception in human neocortex. It unravels how the brain accomplishes Bayesian Causal Inference, a statistical computation fundamental for perception and cognition. Our results demonstrate how the brain combines information in the face of uncertainty about the underlying causal structure of the world.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.plosbiology.org/article/fetchObject.action?uri=info:doi/10.1371/journal.pbio.1002073\&representation=PDF}, DOI = {10.1371/journal.pbio.1002073}, EPUB = {e1002073}, author = {Rohe, T and Noppeney, U} } @Article { AdamN2014, title = {A phonologically congruent sound boosts a visual target into perceptual awareness}, journal = {Frontiers in Integrative Neuroscience}, year = {2014}, month = {9}, volume = {8}, number = {70}, pages = {1-13}, abstract = {Capacity limitations of attentional resources allow only a fraction of sensory inputs to enter our awareness. Most prominently, in the attentional blink the observer often fails to detect the second of two rapidly successive targets that are presented in a sequence of distractor items. To investigate how auditory inputs enable a visual target to escape the attentional blink, this study presented the visual letter targets T1 and T2 together with phonologically congruent or incongruent spoken letter names. First, a congruent relative to an incongruent sound at T2 rendered visual T2 more visible. Second, this T2 congruency effect was amplified when the sound was congruent at T1 as indicated by a T1 congruency x T2 congruency interaction. Critically, these effects were observed both when the sounds were presented in synchrony with and prior to the visual target letters suggesting that the sounds may increase visual target identification via multiple mechanisms such as audiovisual priming or decisional interactions. Our results demonstrate that a sound around the time of T2 increases subjects’ awareness of the visual target as a function of T1 and T2 congruency. Consistent with Bayesian causal inference, the brain may thus combine (1) prior congruency expectations based on T1 congruency and (2) phonological congruency cues provided by the audiovisual inputs at T2 to infer whether auditory and visual signals emanate from a common source and should hence be integrated for perceptual decisions.}, department = {Research Group Noppeney}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fnint.2014.00070/pdf}, DOI = {10.3389/fnint.2014.00070}, author = {Adam, R and Noppeney, U} } @Article { LeoN2014, title = {Conditioned Sounds Enhance Visual Processing}, journal = {PLoS ONE}, year = {2014}, month = {9}, volume = {9}, number = {9}, pages = {1-7}, abstract = {This psychophysics study investigated whether prior auditory conditioning influences how a sound interacts with visual perception. In the conditioning phase, subjects were presented with three pure tones ( = conditioned stimuli, CS) that were paired with positive, negative or neutral unconditioned stimuli. As unconditioned reinforcers we employed pictures (highly pleasant, unpleasant and neutral) or monetary outcomes (+50 euro cents, −50 cents, 0 cents). In the subsequent visual selective attention paradigm, subjects were presented with near-threshold Gabors displayed in their left or right hemifield. Critically, the Gabors were presented in synchrony with one of the conditioned sounds. Subjects discriminated whether the Gabors were presented in their left or right hemifields. Participants determined the location more accurately when the Gabors were presented in synchrony with positive relative to neutral sounds irrespective of reinforcer type. Thus, previously rewarded relative to neutral sounds increased the bottom-up salience of the visual Gabors. Our results are the first demonstration that prior auditory conditioning is a potent mechanism to modulate the effect of sounds on visual perception.}, department = {Research Group Noppeney}, web_url = {http://www.plosone.org/article/fetchObject.action?uri=info\%3Adoi\%2F10.1371\%2Fjournal.pone.0106860\&representation=PDF}, DOI = {10.1371/journal.pone.0106860}, EPUB = {e106860}, author = {Leo, F and Noppeney, U} } @Article { LeeN2014_2, title = {Music expertise shapes audiovisual temporal integration windows for speech, sinewave speech, and music}, journal = {Frontiers in Psychology}, year = {2014}, month = {8}, volume = {5}, number = {868}, pages = {1-9}, abstract = {This psychophysics study used musicians as a model to investigate whether musical expertise shapes the temporal integration window for audiovisual speech, sinewave speech, or music. Musicians and non-musicians judged the audiovisual synchrony of speech, sinewave analogs of speech, and music stimuli at 13 audiovisual stimulus onset asynchronies (±360, ±300 ±240, ±180, ±120, ±60, and 0 ms). Further, we manipulated the duration of the stimuli by presenting sentences/melodies or syllables/tones. Critically, musicians relative to non-musicians exhibited significantly narrower temporal integration windows for both music and sinewave speech. Further, the temporal integration window for music decreased with the amount of music practice, but not with age of acquisition. In other words, the more musicians practiced piano in the past 3 years, the more sensitive they became to the temporal misalignment of visual and auditory signals. Collectively, our findings demonstrate that music practicing fine-tunes the audiovisual temporal integration window to various extents depending on the stimulus class. While the effect of piano practicing was most pronounced for music, it also generalized to other stimulus classes such as sinewave speech and to a marginally significant degree to natural speech.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.00868/pdf}, DOI = {10.3389/fpsyg.2014.00868}, author = {Lee, H and Noppeney, U} } @Article { LeeN2014, title = {Temporal prediction errors in visual and auditory cortices}, journal = {Current Biology}, year = {2014}, month = {4}, volume = {24}, number = {8}, pages = {R309–R310}, abstract = {To form a coherent percept of the environment, the brain needs to bind sensory signals emanating from a common source, but to segregate those from different sources [1]. Temporal correlations and synchrony act as prominent cues for multisensory integration 2, 3 and 4, but the neural mechanisms by which such cues are identified remain unclear. Predictive coding suggests that the brain iteratively optimizes an internal model of its environment by minimizing the errors between its predictions and the sensory inputs 5 and 6. This model enables the brain to predict the temporal evolution of natural audiovisual inputs and their statistical (for example, temporal) relationship. A prediction of this theory is that asynchronous audiovisual signals violating the model’s predictions induce an error signal that depends on the directionality of the audiovisual asynchrony. As the visual system generates the dominant temporal predictions for visual leading asynchrony, the delayed auditory inputs are expected to generate a prediction error signal in the auditory system (and vice versa for auditory leading asynchrony). Using functional magnetic resonance imaging (fMRI), we measured participants’ brain responses to synchronous, visual leading and auditory leading movies of speech, sinewave speech or music. In line with predictive coding, auditory leading asynchrony elicited a prediction error in visual cortices and visual leading asynchrony in auditory cortices. Our results reveal predictive coding as a generic mechanism to temporally bind signals from multiple senses into a coherent percept.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S0960982214001456}, DOI = {10.1016/j.cub.2014.02.007}, author = {Lee, H and Noppeney, U} } @Article { ConradKBHBN2013, title = {Naturalistic Stimulus Structure Determines the Integration of Audiovisual Looming Signals in Binocular Rivalry}, journal = {PLoS ONE}, year = {2013}, month = {8}, volume = {8}, number = {8}, pages = {1-8}, abstract = {Rapid integration of biologically relevant information is crucial for the survival of an organism. Most prominently, humans should be biased to attend and respond to looming stimuli that signal approaching danger (e.g. predator) and hence require rapid action. This psychophysics study used binocular rivalry to investigate the perceptual advantage of looming (relative to receding) visual signals (i.e. looming bias) and how this bias can be influenced by concurrent auditory looming/receding stimuli and the statistical structure of the auditory and visual signals. Subjects were dichoptically presented with looming/receding visual stimuli that were paired with looming or receding sounds. The visual signals conformed to two different statistical structures: (1) a ‘simple’ random-dot kinematogram showing a starfield and (2) a “naturalistic” visual Shepard stimulus. Likewise, the looming/receding sound was (1) a simple amplitude- and frequency-modulated (AM-FM) tone or (2) a complex Shepard tone. Our results show that the perceptual looming bias (i.e. the increase in dominance times for looming versus receding percepts) is amplified by looming sounds, yet reduced and even converted into a receding bias by receding sounds. Moreover, the influence of looming/receding sounds on the visual looming bias depends on the statistical structure of both the visual and auditory signals. It is enhanced when audiovisual signals are Shepard stimuli. In conclusion, visual perception prioritizes processing of biologically significant looming stimuli especially when paired with looming auditory signals. Critically, these audiovisual interactions are amplified for statistically complex signals that are more naturalistic and known to engage neural processing at multiple levels of the cortical hierarchy.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.plosone.org/article/fetchObject.action;jsessionid=F70CA4F95C70436E9FD8AAB37C131527?uri=info\%3Adoi\%2F10.1371\%2Fjournal.pone.0070710\&representation=PDF}, DOI = {10.1371/journal.pone.0070710}, EPUB = {e70710}, author = {Conrad, V and Kleiner, M and Bartels, A and Hartcher O'Brien, J and B{\"u}lthoff, HH and Noppeney, U} } @Article { vonSaldernN2013, title = {Sensory and Striatal Areas Integrate Auditory and Visual Signals into Behavioral Benefits during Motion Discrimination}, journal = {Journal of Neuroscience}, year = {2013}, month = {5}, volume = {33}, number = {20}, pages = {8841-8849}, abstract = {For effective interactions with our dynamic environment, it is critical for the brain to integrate motion information from the visual and auditory senses. Combining fMRI and psychophysics, this study investigated how the human brain integrates auditory and visual motion into benefits in motion discrimination. Subjects discriminated the motion direction of audiovisual stimuli that contained directional motion signal in the auditory, visual, audiovisual, or no modality at two levels of signal reliability. Therefore, this 2 \(\times\) 2 \(\times\) 2 factorial design manipulated: (1) auditory motion information (signal vs noise), (2) visual motion information (signal vs noise), and (3) reliability of motion signal (intact vs degraded). Behaviorally, subjects benefited significantly from audiovisual integration primarily for degraded auditory and visual motion signals while obtaining near ceiling performance for “unisensory” signals when these were reliable and intact. At the neural level, we show audiovisual motion integration bilaterally in the visual motion areas hMT+/V5+ and implicate the posterior superior temporal gyrus/planum temporale in auditory motion processing. Moreover, we show that the putamen integrates audiovisual signals into more accurate motion discrimination responses. Our results suggest audiovisual integration processes at both the sensory and response selection levels. In all of these regions, the operational profile of audiovisual integration followed the principle of inverse effectiveness, in which audiovisual response suppression for intact stimuli turns into response enhancements for degraded stimuli. This response profile parallels behavioral indices of audiovisual integration, in which subjects benefit significantly from audiovisual integration only for the degraded conditions.}, department = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/content/33/20/8841.full.pdf+html}, DOI = {10.1523/​JNEUROSCI.3020-12.2013}, author = {von Saldern, S and Noppeney, U} } @Article { LeitaoTWPN2012, title = {Effects of Parietal TMS on Visual and Auditory Processing at the Primary Cortical Level: A Concurrent TMS-fMRI Study}, journal = {Cerebral Cortex}, year = {2013}, month = {4}, volume = {23}, number = {4}, pages = {873-884}, abstract = {Accumulating evidence suggests that multisensory interactions emerge already at the primary cortical level. Specifically, auditory inputs were shown to suppress activations in visual cortices when presented alone but amplify the blood oxygen level–dependent (BOLD) responses to concurrent visual inputs (and vice versa). This concurrent transcranial magnetic stimulation–functional magnetic resonance imaging (TMS-fMRI) study applied repetitive TMS trains at no, low, and high intensity over right intraparietal sulcus (IPS) and vertex to investigate top-down influences on visual and auditory cortices under 3 sensory contexts: visual, auditory, and no stimulation. IPS-TMS increased activations in auditory cortices irrespective of sensory context as a result of direct and nonspecific auditory TMS side effects. In contrast, IPS-TMS modulated activations in the visual cortex in a state-dependent fashion: it deactivated the visual cortex under no and auditory stimulation but amplified the BOLD response to visual stimulation. However, only the response amplification to visual stimulation was selective for IPS-TMS, while the deactivations observed for IPS- and Vertex-TMS resulted from crossmodal deactivations induced by auditory activity to TMS sounds. TMS to IPS may increase the responses in visual (or auditory) cortices to visual (or auditory) stimulation via a gain control mechanism or crossmodal interactions. Collectively, our results demonstrate that understanding TMS effects on (uni)sensory processing requires a multisensory perspective.}, department = {Research Group Noppeney}, department2 = {Department Scheffler}, web_url = {http://cercor.oxfordjournals.org/content/23/4/873.full.pdf+html}, DOI = {10.1093/cercor/bhs078}, author = {Leit{\~a}o, J and Thielscher, A and Werner, S and Pohmann, R and Noppeney, U} } @Article { BelardinelliOBNP2012, title = {Source Reconstruction Accuracy of MEG and EEG Bayesian Inversion Approaches}, journal = {PLoS ONE}, year = {2012}, month = {12}, volume = {7}, number = {12}, pages = {1-16}, abstract = {Electro- and magnetoencephalography allow for non-invasive investigation of human brain activation and corresponding networks with high temporal resolution. Still, no correct network detection is possible without reliable source localization. In this paper, we examine four different source localization schemes under a common Variational Bayesian framework. A Bayesian approach to the Minimum Norm Model (MNM), an Empirical Bayesian Beamformer (EBB) and two iterative Bayesian schemes (Automatic Relevance Determination (ARD) and Greedy Search (GS)) are quantitatively compared. While EBB and MNM each use a single empirical prior, ARD and GS employ a library of anatomical priors that define possible source configurations. The localization performance was investigated as a function of (i) the number of sources (one vs. two vs. three), (ii) the signal to noise ratio (SNR; 5 levels) and (iii) the temporal correlation of source time courses (for the cases of two or three sources). We also tested whether the use of additional bilateral priors specifying source covariance for ARD and GS algorithms improved performance. Our results show that MNM proves effective only with single source configurations. EBB shows a spatial accuracy of few millimeters with high SNRs and low correlation between sources. In contrast, ARD and GS are more robust to noise and less affected by temporal correlations between sources. However, the spatial accuracy of ARD and GS is generally limited to the order of one centimeter. We found that the use of correlated covariance priors made no difference to ARD/GS performance.}, department = {Research Group Noppeney}, web_url = {http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0051985\&type=printable}, DOI = {10.1371/journal.pone.0051985}, EPUB = {e51985}, author = {Belardinelli, P and Ortiz, E and Barnes, G and Noppeney, U and Preissl, H} } @Article { ConradVN2012, title = {Interactions Between Apparent Motion Rivalry in Vision and Touch}, journal = {Psychological Science}, year = {2012}, month = {8}, volume = {23}, number = {8}, pages = {940-948}, abstract = {In multistable perception, the brain alternates between several perceptual explanations of ambiguous sensory signals. It is unknown whether multistable processes can interact across the senses. In the study reported here, we presented subjects with unisensory (visual or tactile), spatially congruent visuotactile, and spatially incongruent visuotactile apparent motion quartets. Congruent stimulation induced pronounced visuotactile interactions, as indicated by increased dominance times for both vision and touch, and an increased percentage bias for the percept already dominant under unisensory stimulation. Thus, the joint evidence from vision and touch stabilizes the more likely perceptual interpretation and thereby decelerates the rivalry dynamics. Yet the temporal dynamics depended also on subjects’ attentional focus and was generally slower for tactile than for visual reports. Our results support Bayesian approaches to perceptual inference, in which the probability of a perceptual interpretation is determined by combining visual, tactile, or visuotactile evidence with modality-specific priors that depend on subjects’ attentional focus. Critically, the specificity of visuotactile interactions for spatially congruent stimulation indicates multisensory rather than cognitive-bias mechanisms.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Ernst}, department3 = {Research Group Noppeney}, web_url = {http://pss.sagepub.com/content/23/8/940.full.pdf+html}, DOI = {10.1177/0956797612438735}, author = {Conrad, V and Vitello, MP and Noppeney, U} } @Article { GianiOBKPN2012, title = {Steady-state responses in MEG demonstrate information integration within but not across the auditory and visual senses}, journal = {NeuroImage}, year = {2012}, month = {4}, volume = {60}, number = {2}, pages = {1478–1489}, abstract = {To form a unified percept of our environment, the human brain integrates information within and across the senses. This MEG study investigated interactions within and between sensory modalities using a frequency analysis of steady-state responses that are elicited time-locked to periodically modulated stimuli. Critically, in the frequency domain, interactions between sensory signals are indexed by crossmodulation terms (i.e. the sums and differences of the fundamental frequencies). The 3x2 factorial design, manipulated (1) modality: auditory, visual or audiovisual (2) steady-state modulation: the auditory and visual signals were modulated only in one sensory feature (e.g. visual gratings modulated in luminance at 6 Hz) or in two features (e.g. tones modulated in frequency at 40 Hz \& amplitude at 0.2 Hz). This design enabled us to investigate crossmodulation frequencies that are elicited when two stimulus features are modulated concurrently (i) in one sensory modality or (ii) in auditory and visual modalities. In support of within-modality integration, we reliably identified crossmodulation frequencies when two stimulus features in one sensory modality were modulated at different frequencies. In contrast, no crossmodulation frequencies were identified when information needed to be combined from auditory and visual modalities. The absence of audiovisual crossmodulation frequencies suggests that the previously reported audiovisual interactions in primary sensory areas may mediate low level spatiotemporal coincidence detection that is prominent for stimulus transients but less relevant for sustained SSR responses. In conclusion, our results indicate that information in SSRs is integrated over multiple time scales within but not across sensory modalities at the primary cortical level.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811912001322}, DOI = {10.1016/j.neuroimage.2012.01.114}, author = {Giani, AS and Ortiz, EB and Belardinelli, P and Kleiner, M and Preissl, H and Noppeney, U} } @Article { HelbigERPTMSN2011, title = {The neural mechanisms of reliability weighted integration of shape information from vision and touch}, journal = {NeuroImage}, year = {2012}, month = {4}, volume = {60}, number = {2}, pages = {1063–1072}, abstract = {Behaviourally, humans have been shown to integrate multisensory information in a statistically-optimal fashion by averaging the individual unisensory estimates according to their relative reliabilities. This form of integration is optimal in that it yields the most reliable (i.e. least variable) multisensory percept. The present study investigates the neural mechanisms underlying integration of visual and tactile shape information at the macroscopic scale of the regional BOLD response. Observers discriminated the shapes of ellipses that were presented bimodally (visual-tactile) or visually alone. A 2\(\times\)5 factorial design manipulated (i) the presence vs. absence of tactile shape information and (ii) the reliability of the visual shape information (five levels). We then investigated whether regional activations underlying tactile shape discrimination depended on the reliability of visual shape. Indeed, in primary somatosensory cortices (bilateral BA2) and the superior parietal lobe the responses to tactile shape input were increased when the reliability of visual shape information was reduced. Conversely, tactile inputs suppressed visual activations in the right posterior fusiform, when the visual signal was blurred and unreliable. Somatosensory and visual cortices may sustain integration of visual and tactile shape information either via direct connections from visual areas or top-down effects from higher order parietal areas.}, department = {Research Group Ernst}, department2 = {Department Scheffler}, department3 = {Department Sch{\"o}lkopf}, department4 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811911011475}, DOI = {10.1016/j.neuroimage.2011.09.072}, author = {Helbig, HB and Ernst, MO and Ricciardi, E and Pietrini, P and Thielscher, A and Mayer, KM and Schultz, J and Noppeney, U} } @Article { LeeN2011_2, title = {Long-term music training tunes how the brain temporally binds signals from multiple senses}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, year = {2011}, month = {12}, volume = {108}, number = {51}, pages = {E1441-E1450}, abstract = {Practicing a musical instrument is a rich multisensory experience involving the integration of visual, auditory, and tactile inputs with motor responses. This combined psychophysics–fMRI study used the musician's brain to investigate how sensory-motor experience molds temporal binding of auditory and visual signals. Behaviorally, musicians exhibited a narrower temporal integration window than nonmusicians for music but not for speech. At the neural level, musicians showed increased audiovisual asynchrony responses and effective connectivity selectively for music in a superior temporal sulcus-premotor-cerebellar circuitry. Critically, the premotor asynchrony effects predicted musicians’ perceptual sensitivity to audiovisual asynchrony. Our results suggest that piano practicing fine tunes an internal forward model mapping from action plans of piano playing onto visible finger movements and sounds. This internal forward model furnishes more precise estimates of the relative audiovisual timings and hence, stronger prediction error signals specifically for asynchronous music in a premotor-cerebellar circuitry. Our findings show intimate links between action production and audiovisual temporal binding in perception.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, web_url = {http://www.pnas.org/content/108/51/E1441.full.pdf+html}, DOI = {10.1073/pnas.1115267108}, author = {Lee, HL and Noppeney, U} } @Article { MeyerN2011, title = {Multisensory integration: From fundamental principles to translational research}, journal = {Experimental Brain Research}, year = {2011}, month = {9}, volume = {213}, number = {2-3}, pages = {163-166}, abstract = {We perceive our natural environment via multiple senses. How does our mind and brain integrate these diverse sensory inputs into a coherent and unified percept? This challenging and exciting question has been the focus of a growing multidisciplinary community of researchers that meet regularly at the annual meeting of the International Multisensory Research Forum (IMRF). The IMRF meeting brings together researchers that investigate multisensory integration at multiple levels of description ranging from neurophysiology to behaviour, with research interests from theory to applications. Traditionally, multisensory research has focused on the characterization of the fundamental principles and constraints that govern multisensory integration. Research has then moved forward to address questions of how multisensory integration emerges during development and may be perturbed in cases of disease or ageing. In the long-term, multisensory research will have direct impact in translational studies investigating the benefits of a multisensory environment for patients that are impaired when presented with information in one sensory modality alone. Obviously, this myriad of research topics can only be addressed by combining findings from a multitude of methods including psychophysics, neurophysiology and non-invasive structural and functional imaging in humans. Further, since its inception multisensory research has constantly gained impetus from computational models. Computational models contribute substantially to the progress made in multisensory research by providing a deeper understanding of the current empirical findings and conversely making predictions that guide future research. Most prominently, the normative Bayesian perspective continues to inspire inquiries into the optimality of multisensory integration across various species. This special issue on multisensory processing has resulted from the IMRF meeting held at Liverpool University in 2010. In accordance with previous procedures, the call for papers was not restricted to meeting attendees but was open to the entire multisensory community. As has been the tradition since the first special IMRF issue, we received a large number of high quality submissions leading to strong competition. Many excellent submissions had to be rejected or transferred to other special issues because of space limitations. Nevertheless, we hope that the collection of manuscripts included in this special issue will provide a rich source of reference for the wider multisensory community. Given the multidisciplinarity of the IMRF community, the submitted manuscripts cover a range of the topics that have briefly been highlighted above. For coarse reference, we have grouped the manuscripts into five sections.}, department = {Research Group Noppeney}, web_url = {http://www.springerlink.com/content/a15607v4313m6tv5/fulltext.pdf}, DOI = {10.1007/s00221-011-2803-z}, author = {Meyer, GF and Noppeney, U} } @Article { LeeN2011, title = {Physical and Perceptual Factors Shape the Neural Mechanisms That Integrate Audiovisual Signals in Speech Comprehension}, journal = {Journal of Neuroscience}, year = {2011}, month = {8}, volume = {31}, number = {31}, pages = {11338-11350}, abstract = {Face-to-face communication challenges the human brain to integrate information from auditory and visual senses with linguistic representations. Yet the role of bottom-up physical (spectrotemporal structure) input and top-down linguistic constraints in shaping the neural mechanisms specialized for integrating audiovisual speech signals are currently unknown. Participants were presented with speech and sinewave speech analogs in visual, auditory, and audiovisual modalities. Before the fMRI study, they were trained to perceive physically identical sinewave speech analogs as speech (SWS-S) or nonspeech (SWS-N). Comparing audiovisual integration (interactions) of speech, SWS-S, and SWS-N revealed a posterior–anterior processing gradient within the left superior temporal sulcus/gyrus (STS/STG): Bilateral posterior STS/STG integrated audiovisual inputs regardless of spectrotemporal structure or speech percept; in left mid-STS, the integration profile was primarily determined by the spectrotemporal structure of the signals; more anterior STS regions discarded spectrotemporal structure and integrated audiovisual signals constrained by stimulus intelligibility and the availability of linguistic representations. In addition to this “ventral” processing stream, a “dorsal” circuitry encompassing posterior STS/STG and left inferior frontal gyrus differentially integrated audiovisual speech and SWS signals. Indeed, dynamic causal modeling and Bayesian model comparison provided strong evidence for a parallel processing structure encompassing a ventral and a dorsal stream with speech intelligibility training enhancing the connectivity between posterior and anterior STS/STG. In conclusion, audiovisual speech comprehension emerges in an interactive process with the integration of auditory and visual signals being progressively constrained by stimulus intelligibility along the STS and spectrotemporal structure in a dorsal fronto-temporal circuitry.}, department = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/content/31/31/11338.full.pdf+html}, DOI = {10.1523/​JNEUROSCI.6510-10.2011}, author = {Lee, HL and Noppeney, U} } @Article { 6741, title = {The Contributions of Transient and Sustained Response Codes to Audiovisual Integration}, journal = {Cerebral Cortex}, year = {2011}, month = {4}, volume = {21}, number = {4}, pages = {920-931}, abstract = {Multisensory events in our natural environment unfold at multiple temporal scales over extended periods of time. This functional magnetic resonance imaging study investigated whether the brain uses transient (onset, offset) or sustained temporal codes to effectively integrate incoming visual and auditory signals within the cortical hierarchy. Subjects were presented with 1) velocity-modulated radial motion, 2) amplitude-modulated sound, or 3) an in phase combination of both in blocks of variable durations to dissociate transient and sustained blood oxygen level–dependent responses. Audiovisual interactions emerged primarily for transient onset and offset responses highlighting the importance of rapid stimulus transitions for multisensory integration. Strikingly, audiovisual interactions for onset and offset transients were dissociable at the functional and anatomical level. Low-level sensory areas integrated audiovisual inputs at stimulus onset in a superadditive fashion to enhance stimulus salience. In contrast, higher order association areas showed subadditive integration profiles at stimulus offset possibly reflecting the formation of higher order representations. In conclusion, multisensory integration emerges at multiple levels of the cortical hierarchy using different temporal codes and integration profiles. From a methodological perspective, these results highlight the limitations of conventional event related or block designs that cannot characterize these rich dynamics of audiovisual integration.}, department = {Research Group Noppeney}, web_url = {http://cercor.oxfordjournals.org/content/21/4/920.full.pdf+html}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/cercor/bhq161}, author = {Werner, S and Noppeney, U} } @Article { 6313, title = {Audiovisual asynchrony detection in human speech}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, year = {2011}, month = {2}, volume = {37}, number = {1}, pages = {245-256}, abstract = {Combining information from the visual and auditory senses can greatly enhance intelligibility of natural speech. Integration of audiovisual speech signals is robust even when temporal offsets are present between the component signals. In the present study, we characterized the temporal integration window for speech and nonspeech stimuli with similar spectrotemporal structure to investigate to what extent humans have adapted to the specific characteristics of natural audiovisual speech. We manipulated spectrotemporal structure of the auditory signal, stimulus length, and task context. Results indicate that the temporal integration window is narrower and more asymmetric for speech than for nonspeech signals. When perceiving audiovisual speech, subjects tolerate visual leading asynchronies, but are nevertheless very sensitive to auditory leading asynchronies that are less likely to occur in natural speech. Thus, speech perception may be fine-tuned to the natural statistics of audiovisual speech, where facial movements always occur before acoustic speech articulation.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Ernst}, department3 = {Research Group Noppeney}, web_url = {http://psycnet.apa.org/psycarticles/2010-17527-001.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1037/a0019952}, author = {Maier, JX and Di Luca, M and Noppeney, U} } @Article { 6511, title = {Prior auditory information shapes visual category-selectivity in ventral occipito-temporal cortex}, journal = {NeuroImage}, year = {2010}, month = {10}, volume = {52}, number = {4}, pages = {1592-1602}, abstract = {Objects in our natural environment generate signals in multiple sensory modalities. This fMRI study investigated the influence of prior task-irrelevant auditory information on visually-evoked category-selective activations in the ventral occipito-temporal cortex. Subjects categorized pictures as landmarks or animal faces, while ignoring the preceding congruent or incongruent sound. Behaviorally, subjects responded slower to incongruent than congruent stimuli. At the neural level, the lateral and medial prefrontal cortices showed increased activations for incongruent relative to congruent stimuli consistent with their role in response selection. In contrast, the parahippocampal gyri combined visual and auditory information additively: activation was greater for visual landmarks than animal faces and landmark-related sounds than animal vocalizations resulting in increased parahippocampal selectivity for congruent audiovisual landmarks. Effective connectivity analyses showed that this amplification of visual lan dmark-selectivity was mediated by increased negative coupling of the parahippocampal gyrus with the superior temporal sulcus for congruent stimuli. Thus, task-irrelevant auditory information influences visual object categorization at two stages. In the ventral occipito-temporal cortex auditory and visual category information are combined additively to sharpen visual category-selective responses. In the left inferior frontal sulcus, as indexed by a significant incongruency effect, visual and auditory category information are integrated interactively for response selection.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-501FPKM-5-N\&_cdi=6968\&_user=29041\&_pii=S1053811910007056\&_orig=search\&_coverDate=05\%2F07\%2F2010\&_sk=999999999\&view=c\&wchp=dGLbVtz-zSkWb\&md5=9500841a0f1e7}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2010.05.002}, author = {Adam, R and Noppeney, U} } @Article { 6780, title = {Audiovisual interactions in binocular rivalry}, journal = {Journal of Vision}, year = {2010}, month = {8}, volume = {10}, number = {10:27}, pages = {1-15}, abstract = {When the two eyes are presented with dissimilar images, human observers report alternating percepts—a phenomenon coined binocular rivalry. These perceptual fluctuations reflect competition between the two visual inputs both at monocular and binocular processing stages. Here we investigated the influence of auditory stimulation on the temporal dynamics of binocular rivalry. In three psychophysics experiments, we investigated whether sounds that provide directionally congruent, incongruent, or non-motion information modulate the dominance periods of rivaling visual motion percepts. Visual stimuli were dichoptically presented random-dot kinematograms (RDKs) at different levels of motion coherence. The results show that directional motion sounds rather than auditory input per se influenced the temporal dynamics of binocular rivalry. In all experiments, motion sounds prolonged the dominance periods of the directionally congruent visual motion percept. In contrast, motion sounds abbreviated the suppression periods of the directionally congruent visual motion percepts only when they competed with directionally incongruent percepts. Therefore, analogous to visual contextual effects, auditory motion interacted primarily with consciously perceived visual input rather than visual input suppressed from awareness. Our findings suggest that auditory modulation of perceptual dominance times might be established in a top-down fashion by means of feedback mechanisms.}, department = {Department B{\"u}lthoff}, department2 = {Department Logothetis}, department3 = {Research Group Noppeney}, web_url = {http://www.journalofvision.org/content/10/10/27.full.pdf+html}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1167/10.10.27}, author = {Conrad, V and Bartels, A and Kleiner, M and Noppeney, U} } @Article { 6117, title = {Superadditive Responses in Superior Temporal Sulcus Predict Audiovisual Benefits in Object Categorization}, journal = {Cerebral Cortex}, year = {2010}, month = {8}, volume = {20}, number = {8}, pages = {1829-1842}, abstract = {Merging information from multiple senses provides a more reliable percept of our environment. Yet, little is known about where and how various sensory features are combined within the cortical hierarchy. Combining functional magnetic resonance imaging and psychophysics, we investigated the neural mechanisms underlying integration of audiovisual object features. Subjects categorized or passively perceived audiovisual object stimuli with the informativeness (i.e., degradation) of the auditory and visual modalities being manipulated factorially. Controlling for low-level integration processes, we show higher level audiovisual integration selectively in the superior temporal sulci (STS) bilaterally. The multisensory interactions were primarily subadditive and even suppressive for intact stimuli but turned into additive effects for degraded stimuli. Consistent with the inverse effectiveness principle, auditory and visual informativeness determine the profile of audiovisual integration in STS similarly to the influ ence of physical stimulus intensity in the superior colliculus. Importantly, when holding stimulus degradation constant, subjects’ audiovisual behavioral benefit predicts their multisensory integration profile in STS: only subjects that benefit from multisensory integration exhibit superadditive interactions, while those that do not benefit show suppressive interactions. In conclusion, superadditive and subadditive integration profiles in STS are functionally relevant and related to behavioral indices of multisensory integration with superadditive interactions mediating successful audiovisual object categorization.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/Werner2009_SuperadditiveResponsesInSTSpredictAudiovisualBenefits_6117[0].pdf}, department = {Research Group Noppeney}, web_url = {http://cercor.oxfordjournals.org/cgi/reprint/bhp248v1}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/cercor/bhp248}, author = {Werner, S and Noppeney, U} } @Article { 6593, title = {Perceptual Decisions Formed by Accumulation of Audiovisual Evidence in Prefrontal Cortex}, journal = {Journal of Neuroscience}, year = {2010}, month = {5}, volume = {30}, number = {21}, pages = {7434-7446}, abstract = {To form perceptual decisions in our multisensory environment, the brain needs to integrate sensory information derived from a common source and segregate information emanating from different sources. Combining fMRI and psychophysics in humans, we investigated how the brain accumulates sensory evidence about a visual source in the context of congruent or conflicting auditory information. In a visual selective attention paradigm, subjects (12 females, 7 males) categorized video clips while ignoring concurrent congruent or incongruent soundtracks. Visual and auditory information were reliable or unreliable. Our behavioral data accorded with accumulator models of perceptual decision making, where sensory information is integrated over time until a criterion amount of information is obtained. Behaviorally, subjects exhibited audiovisual incongruency effects that increased with the variance of the visual and the reliability of the interfering auditory input. At the neural level, only the left inferior frontal sulcus (IFS) showed an ''audiovisual-accumulator'' profile consistent with the observed reaction time pattern. By contrast, responses in the right fusiform were amplified by incongruent auditory input regardless of sensory reliability. Dynamic causal modeling showed that these incongruency effects were mediated via connections from auditory cortex. Further, while the fusiform interacted with IFS in an excitatory recurrent loop that was strengthened for unreliable task-relevant visual input, the IFS did not amplify and even inhibited superior temporal activations for unreliable auditory input. To form decisions that guide behavioral responses, the IFS may accumulate audiovisual evidence by dynamically weighting its connectivity to auditory and visual regions according to sensory reliability and decisional relevance.}, department = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/cgi/reprint/30/21/7434}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1523/JNEUROSCI.0455-10.2010}, author = {Noppeney, U and Ostwald, D and Werner, S} } @Article { 6238, title = {Distinct Functional Contributions of Primary Sensory and Association Areas to Audiovisual Integration in Object Categorization}, journal = {Journal of Neuroscience}, year = {2010}, month = {2}, volume = {30}, number = {7}, pages = {2662-2675}, abstract = {Multisensory interactions have been demonstrated in a distributed neural system encompassing primary sensory and higher-order association areas. However, their distinct functional roles in multisensory integration remain unclear. This fMRI study dissociated the functional contributions of three cortical levels to multisensory integration in object categorization. Subjects actively categorized or passively perceived noisy auditory and visual signals emanating from everyday actions with objects. The experiment included two 2x2 factorial designs that manipulated either (i) the presence/absence or (ii) the informativeness of the sensory inputs. These experimental manipulations revealed three patterns of audiovisual interactions. (1) In primary auditory cortices (PAC), a concurrent visual input increased the stimulus salience by amplifying the auditory response irrespective of task-context. Effective connectivity analyses demonstrated that this automatic response amplification is mediated via both, direct and indi rect (via STS) connectivity to visual cortices. (2) In superior temporal (STS) and intraparietal (IPS) sulci, audiovisual interactions sustained the integration of higher-order object features and predicted subjects’ audiovisual benefits in object categorization. (3) In the left ventrolateral prefrontal cortex (vlPFC), explicit semantic categorization resulted in suppressive audiovisual interactions as an index for multisensory facilitation of semantic retrieval and response selection. In conclusion, multisensory integration emerges at multiple processing stages within the cortical hierarchy. The distinct profiles of audiovisual interactions dissociate audiovisual salience effects in PAC, formation of object representations in STS/IPS and audiovisual facilitation of semantic categorization in vlPFC. Furthermore, in STS/IPS, the profiles of audiovisual interactions were behaviorally relevant and predicted subjects’ multisensory benefits in performance accuracy.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/Werner2010_JoNsc_6238[0].pdf}, department = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/cgi/reprint/30/7/2662}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1523/JNEUROSCI.5091-09.2010}, author = {Werner, S and Noppeney, U} } @Article { 5775, title = {The sensory-motor theory of semantics: Evidence from functional imaging}, journal = {Language and Cognition}, year = {2009}, month = {10}, volume = {1}, number = {2}, pages = {249-276}, abstract = {This review discusses the contributions of functional imaging (fMRI/PET) to our understanding of how semantic concepts are represented and processed in the human brain. The sensory-motor theory of semantic memory suggests that semantic processing relies on reactivation of sensory-motor representations that were involved in perception and action. More specifically, it attributes an apparent category-specific (e.g. tool vs. animals) organization of semantics to anatomical segregation for different semantic features (e.g. action vs. visual). Within this framework, we will review functional imaging evidence that semantic processing of tools and actions may rely on activations within the visuo-motor system.}, department = {Research Group Noppeney}, web_url = {http://www.reference-global.com/doi/pdf/10.1515/LANGCOG.2009.012}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1515/LANGCOG.2009.012}, author = {Noppeney, U} } @Article { 5912, title = {Natural, Metaphoric, and Linguistic Auditory Direction Signals Have Distinct Influences on Visual Motion Processing}, journal = {Journal of Neuroscience}, year = {2009}, month = {5}, volume = {29}, number = {20}, pages = {6490-6499}, abstract = {To interact with our dynamic environment, the brain merges motion information from auditory and visual senses. However, not only ''natural'' auditory MOTION, but also ''metaphoric'' de/ascending PITCH and SPEECH (e.g., ''left/right''), influence the visual motion percept. Here, we systematically investigate whether these three classes of direction signals influence visual motion perception through shared or distinct neural mechanisms. In a visual-selective attention paradigm, subjects discriminated the direction of visual motion at several levels of reliability, with an irrelevant auditory stimulus being congruent, absent, or incongruent. Although the natural, metaphoric, and linguistic auditory signals were equally long and adjusted to induce a comparable directional bias on the motion percept, they influenced visual motion processing at different levels of the cortical hierarchy. A significant audiovisual interaction was revealed for MOTION in left human motion complex (hMT+/V5+) and for SPEECH in right intraparietal sulcus. In fact, the audiovisual interaction gradually decreased in left hMT+/V5+ for MOTION > PITCH > SPEECH and in right intraparietal sulcus for SPEECH > PITCH > MOTION. In conclusion, natural motion signals are integrated in audiovisual motion areas, whereas the influence of culturally learnt signals emerges primarily in higher-level convergence regions.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.jneurosci.org/cgi/reprint/29/20/6490}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1523/JNEUROSCI.5437-08.2009}, author = {Sadaghiani, S and Maier, JX and Noppeney, U} } @Article { 5138, title = {The integration of higher order form and motion by the human brain}, journal = {NeuroImage}, year = {2008}, month = {10}, volume = {42}, number = {4}, pages = {1529-1536}, abstract = {Our experience with a dynamic environment has tuned our visual system to use form and motion as complementary sources of information for object recognition. To identify the neural systems involved in integrating form and motion information during dynamic object processing, we used an fMRI adaptation paradigm which factorially manipulated form and motion repetition. Observers were sequentially presented with pairs of rotating novel objects in which the form or rotation direction in depth could be repeated. They were required to discriminate either dimension of the second target object, while the first object served as a form or motion prime. At the behavioural level, observers were faster to recognize the target or discriminate its direction when primed by the same form. Importantly, this form priming effect was enhanced when prime and target objects rotated in the same direction. At the neural level, the two priming effects (i.e., the main effect of form repetition and the interaction between form and motion repetition) were associated with reduced activations in distinct brain regions. Bilateral lateral occipital regions exhibited reduced activation when form was repeated irrespective of rotation direction. In contrast, bilateral anterior fusiform and posterior middle temporal regions (overlapping with hMT+/V5) regions showed an adaptation effect that depended on both form and motion direction. Thus, the current results reveal a visual processing hierarchy with lateral occipito-temporal cortex representing an object’s 3D structure, and anterior fusiform and posterior middle temporal regions being involved in spatio-temporal integration of form and motion during dynamic object processing.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-4SH0Y0C-2-M\&_cdi=6968\&_user=29041\&_orig=search\&_coverDate=10\%2F01\%2F2008\&_sk=999579995\&view=c\&wchp=dGLbVtb-zSkWb\&md5=2afffb798d11cb801968184b420b9242\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2008.04.265}, author = {Sarkheil, P and Vuong, QC and B{\"u}lthoff, HH and Noppeney, U} } @Article { 5379, title = {Imaging seizure activity: A combined EEG/EMG-fMRI study in reading epilepsy}, journal = {Epilepsia}, year = {2008}, month = {8}, volume = {50}, number = {2}, pages = {256-264}, abstract = {Purpose: To characterize the spatial relationship between activations related to language-induced seizure activity, language processing, and motor control in patients with reading epilepsy. Methods: We recorded and simultaneously monitored several physiological parameters [voice-recording, electromyography (EMG), electrocardiography (ECG), electroencephalography (EEG)] during blood oxygen level-dependent (BOLD) functional magnetic resonance imaging (fMRI) in nine patients with reading epilepsy. Individually tailored language paradigms were used to induce and record habitual seizures inside the MRI scanner. Voxel-based morphometry (VBM) was used for structural brain analysis. Reading-induced seizures occurred in six out of nine patients. Results: One patient experienced abundant orofacial reflex myocloni during silent reading in association with bilateral frontal or generalized epileptiform discharges. In a further five patients, symptoms were only elicited while reading aloud with self-indicated events. Consistent activation patterns in response to reading-induced myoclonic seizures were observed within left motor and premotor areas in five of these six patients, in the left striatum (n = 4), in mesiotemporal/limbic areas (n = 4), in Brodmann area 47 (n = 3), and thalamus (n = 2). These BOLD activations were overlapping or adjacent to areas physiologically activated during language and facial motor tasks. No subtle structural abnormalities common to all patients were identified using VBM, but one patient had a left temporal ischemic lesion. Discussion: Based on the findings, we hypothesize that reflex seizures occur in reading epilepsy when a critical mass of neurons are activated through a provoking stimulus within corticoreticular and corticocortical circuitry subserving normal functions.}, department = {Research Group Noppeney}, web_url = {http://www3.interscience.wiley.com/cgi-bin/fulltext/121383967/PDFSTART}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1111/j.1528-1167.2008.01737.x}, author = {Salek-Haddadi, A and Mayer, T and Hamandi, K and Symms, M and Josephs, O and Fluegel, D and Woermann, F and Richardson, MP and Noppeney, U and Wolf, P and Koepp, MJ} } @Article { 4610, title = {Selective activation around the left occipito-temporal sulcus for words relative to pictures: individual variability or false positives?}, journal = {Human Brain Mapping}, year = {2008}, month = {8}, volume = {29}, number = {8}, pages = {986-1000}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www3.interscience.wiley.com/cgi-bin/fulltext/115806086/PDFSTART}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1002/hbm.20443}, author = {Wright, ND and Mechelli, A and Noppeney, U and Veltman, DJ and Rombouts, SARB and Glensman, J and Haynes, J-D and Price, CJ} } @Article { 5174, title = {The neural systems of tool and action semantics: A perspective from functional imaging}, journal = {Journal of Physiology - Paris}, year = {2008}, month = {5}, volume = {102}, number = {1-3}, pages = {40-49}, abstract = {This review discusses the contributions of functional imaging (fMRI/PET) to our understanding of how action and tool concepts are represented and processed in the human brain. Category-selective deficits in neuropsychological patients have suggested a fine-grained functional specialization within the neural systems of semantics. However, the underlying principles of semantic organization remain controversial. The feature-based account of semantic memory (or ‘sensory-motor theory’) predicates category-selective effects (e.g. tool vs. animals) on anatomical segregation for different semantic features (e.g. action vs. visual). Within this framework, we will review functional imaging evidence that semantic processing of tools and actions may rely on activations within the visuo-motor system.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6VMC-4S7JFVY-1-7\&_cdi=6147\&_user=29041\&_orig=search\&_coverDate=05\%2F31\%2F2008\&_sk=998979998\&view=c\&wchp=dGLbVzW-zSkzS\&md5=840b72da18aaa4abd82963b14c7ee9ae\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.jphysparis.2008.03.009}, author = {Noppeney, U} } @Article { 4600, title = {The effect of prior visual information on recognition of speech and sounds}, journal = {Cerebral Cortex}, year = {2008}, month = {3}, volume = {18}, number = {3}, pages = {598-609}, abstract = {To identify and categorize complex stimuli such as familiar objects or speech, the human brain integrates information that is abstracted at multiple levels from its sensory inputs. Using cross-modal priming for spoken words and sounds, this functional magnetic resonance imaging study identified 3 distinct classes of visuoauditory incongruency effects: visuoauditory incongruency effects were selective for 1) spoken words in the left superior temporal sulcus (STS), 2) environmental sounds in the left angular gyrus (AG), and 3) both words and sounds in the lateral and medial prefrontal cortices (IFS/mPFC). From a cognitive perspective, these incongruency effects suggest that prior visual information influences the neural processes underlying speech and sound recognition at multiple levels, with the STS being involved in phonological, AG in semantic, and mPFC/IFS in higher conceptual processing. In terms of neural mechanisms, effective connectivity analyses (dynamic causal modeling) suggest that these incongruenc y effects may emerge via greater bottom-up effects from early auditory regions to intermediate multisensory integration areas (i.e., STS and AG). This is consistent with a predictive coding perspective on hierarchical Bayesian inference in the cortex where the domain of the prediction error (phonological vs. semantic) determines its regional expression (middle temporal gyrus/STS vs. AG/intraparietal sulcus).}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/bhm091v1_noppeney_[0].pdf}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://cercor.oxfordjournals.org/cgi/reprint/bhm091v1?maxtoshow=\&HITS=10\&hits=10\&RESULTFORMAT=1\&author1=noppeney\&andorexacttitle=and\&andorexacttitleabs=and\&andorexactfulltext=and\&searchid=1\&a}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/cercor/bhm091}, author = {Noppeney, U and Josephs, O and Hocking, J and Price, CJ and Friston, KJ} } @Article { 4473, title = {The effects of visual deprivation on functional and structural organization of the human brain}, journal = {Neuroscience and Biobehavioral Reviews}, year = {2007}, month = {5}, volume = {31}, number = {8}, pages = {1169-1180}, abstract = {Early onset blindness allows one to investigate how the human brain adapts to sensory experience in infancy and early childhood. Over the past decade, lesion, functional and structural imaging studies have accumulated evidence that severe perturbations to visual experience alter the functional and structural organization of the human brain. Visual deprivation can induce plastic changes not only in the visual system, but also in the remaining intact sensory–motor system, secondary to altered experience using these spared modalities. In particular, occipital, usually visual, areas are reorganized and recruited by the remaining senses and higher cognitive tasks primarily through cortico-cortical connectivity. Importantly, these plastic changes vary as a function of timing and are most pronounced in early onset blindness. Thus, sensory experience shapes functional and structural brain organization during sensitive periods in neurodevelopment.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6T0J-4NR18D1-1-1\&_cdi=4864\&_user=29041\&_orig=search\&_coverDate=05\%2F13\%2F2007\&_sk=999999999\&view=c\&wchp=dGLzVlz-zSkzS\&md5=270478a8e6d61d1e25db697f0488522a\&ie=}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neubiorev.2007.04.012}, author = {Noppeney, U} } @Article { 4232, title = {Temporal lobe lesions and semantic impairment: A comparison of Herpes Simplex Virus Encephalitis and Semantic Dementia}, journal = {Brain}, year = {2007}, month = {1}, volume = {130}, number = {4}, pages = {1138-1147}, abstract = {Both herpes simplex virus encephalitis (HSVE) and semantic dementia (SD) typically affect anterior temporal lobe structures. Using voxel-based morphometry (VBM), this study compared the structural damage in four HSVE patients having a semantic deficit particularly affecting knowledge of living things and six SD patients with semantic impairment across all categories tested. Each patient was assessed relative to a group of control subjects. In both patient groups, left anterior temporal damage extended into the amygdala. In patients with HSVE, extensive grey matter loss was observed predominantly in the medial parts of the anterior temporal cortices bilaterally in SD patients the abnormalities extended more laterally and posteriorly in either the left, right or both temporal lobes. Based on a lesion deficit rationale and converging results from several other sources of evidence, we suggest that (i) antero-medial temporal cortex may be important for processing and differentiating between concepts that are ‘tig htly packed’ in semantic space, such as living things, whereas (ii) inferolateral temporal cortex may play a more general role within the semantic system.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://brain.oxfordjournals.org/cgi/reprint/130/4/1138}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/brain/awl344}, author = {Noppeney, U and Patterson, K and Tyler, LK and Moss, H and Stamatakis, EA and Bright, P and Mummery, C and Price, CJ} } @Article { 4082, title = {Language control in the bilingual brain}, journal = {Science}, year = {2006}, month = {6}, volume = {312}, number = {5779}, pages = {1537-1540}, abstract = {How does the bilingual brain distinguish and control which language is in use? Previous functional imaging experiments have not been able to answer this question because proficient bilinguals activate the same brain regions irrespective of the language being tested. Here, we reveal that neuronal responses within the left caudate are sensitive to changes in the language or the meaning of words. By demonstrating this effect in populations of German-English and Japanese-English bilinguals, we suggest that the left caudate plays a universal role in monitoring and controlling the language in use.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencemag.org/cgi/reprint/312/5779/1537.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1126/science.1127761}, author = {Crinion, J and Turner, R and Grogan, A and Hanakawa, T and Noppeney, U and Devlin, J and Aso, T and Urayama, S and Fukuyama, H and Stockton, K and Usui, K and Green, DW and Price, CJ} } @Article { 4054, title = {Hemispheric asymmetries in language-related pathways: A combined functional MRI and tractography study}, journal = {Neuroimage}, year = {2006}, month = {5}, volume = {32}, number = {1}, pages = {388-399}, abstract = {Functional lateralization is a feature of human brain function, most apparent in the typical left-hemisphere specialization for language. A number of anatomical and imaging studies have examined whether structural asymmetries underlie this functional lateralization. We combined functional MRI (fMRI) and diffusion-weighted imaging (DWI) with tractography to study 10 healthy right-handed subjects. Three language fMRI paradigms were used to define language-related regions in inferior frontal and superior temporal regions. A probabilistic tractography technique was then employed to delineate the connections of these functionally defined regions. We demonstrated consistent connections between Broca‘s and Wernicke‘s areas along the superior longitudinal fasciculus bilaterally but more extensive fronto-temporal connectivity on the left than the right. Both tract volumes and mean fractional anisotropy (FA) were significantly greater on the left than the right. We also demonst rated a correlation between measures of structure and function, with subjects with more lateralized fMRI activation having a more highly lateralized mean FA of their connections. These structural asymmetries are in keeping with the lateralization of language function and indicate the major structural connections underlying this function.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-4JVTCDN-5-2\&_cdi=6968\&_user=29041\&_orig=search\&_coverDate=08\%2F01\%2F2006\&_sk=999679998\&view=c\&wchp=dGLzVzz-zSkWz\&md5=6b9f558390f1266b02da4419de73034b\&ie=}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2006.03.011}, author = {Powell, HW and Parker, GJ and Alexander, D and Symms, MR and Boulby, PA and Wheeler-Kingshott, CA and Noppeney, U and Barker, GJ and Koepp, MJ and Duncan, JS} } @Article { 3689, title = {Identification of degenerate neuronal systems based on intersubject variability}, journal = {Neuroimage}, year = {2006}, month = {4}, volume = {30}, number = {3}, pages = {885-890}, abstract = {Group studies implicitly assume that all subjects activate one common system to sustain a particular cognitive task. Intersubject variability is generally treated as well-behaved and uninteresting noise. However, intersubject variability might result from subjects engaging different degenerate neuronal systems that are each sufficient for task performance. This would produce a multimodal distribution of intersubject variability. We have explored this idea with the help of Gaussian Mixture Modeling and Bayesian model comparison procedures. We illustrate our approach using a crossmodal priming paradigm, in which subjects perform a semantic decision on environmental sounds or their spoken names that were preceded by a semantically congruent or incongruent picture or written name. All subjects consistently activated the superior temporal gyri bilaterally, the left fusiform gyrus and the inferior frontal sulcus. Comparing a One and Two Gaussian Mixture Model of the unexplained residuals provided very strong eviden ce for two groups with distinct activation patterns: 6 subjects exhibited additional activations in the superior temporal sulci bilaterally, the right superior frontal and central sulcus. 11 subjects showed increased activation in the striate and the right inferior parietal cortex. These results suggest that semantic decisions on auditory–visual compound stimuli might be accomplished by two overlapping degenerate neuronal systems.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-4HM7S0B-7-1\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=04\%2F15\%2F2006\&_sk=999699996\&view=c\&wchp=dGLzVzz-zSkWW\&md5=a71a}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2005.10.010}, author = {Noppeney, U and Penny, WD and Price, CJ and Flandin, G and Friston, KJ} } @Article { 3839, title = {Two approaches to repetition suppression}, journal = {Human Brain Mapping}, year = {2006}, month = {3}, volume = {27}, number = {5}, pages = {411-416}, abstract = {Repetition suppression refers to the phenomenon that prior processing of stimuli (or stimulus attributes) decreases activation elicited by processing subsequent stimuli with identical attributes. We present two complementary approaches to identify regions that show repetition suppression for subsequent sentences with either identical: (1) sentence forms or (2) speakers. The first categorical approach simply compares sentences that are presented in Same and Different blocks. The second factorial approach operationally defines repetition suppression as decreased activation for the subsequent Same stimulus relative to its preceding sentence. To account for nonspecific time confounds, this approach tests for a repetition \(\times\) condition (Same or Different) interaction. Surprisingly, the two approaches revealed different results: Only the categorical analysis detected sentence repetition effects in multiple regions within a bilateral frontotemporal system that has previously been implicated in sentence processing. Th ese discrepancies might be due to the different efficiencies with which the particular contrasts were estimated or spurious differences in stimuli or attentional set that could not be entirely controlled within a single subject. Finally, we combined the two approaches in a [global null] conjunction analysis.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www3.interscience.wiley.com/cgi-bin/fulltext/112550800/PDFSTART}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1002/hbm.20242}, author = {Noppeney, U and Penny, WD} } @Article { 3683, title = {How reading differs from object naming at the neuronal level}, journal = {Neuroimage}, year = {2006}, month = {1}, volume = {29}, number = {2}, pages = {643-648}, abstract = {TThis paper uses whole brain functional neuroimaging in neurologically normal participants to explore how reading aloud differs from object naming in terms of neuronal implementation. In the first experiment, we directly compared brain activation during reading aloud and object naming. This revealed greater activation for reading in bilateral premotor, left posterior superior temporal and precuneus regions. In a second experiment, we segregated the object-naming system into object recognition and speech production areas by factorially manipulating the presence or absence of objects (pictures of objects or their meaningless scrambled counterparts) with the presence or absence of speech production (vocal vs. finger press responses). This demonstrated that the areas associated with speech production (object naming and repetitively saying “OK” to meaningless scrambled pictures) corresponded exactly to the areas where responses were higher for reading aloud than object naming in Experiment 1. Collectively the results suggest that, relative to object naming, reading increases the demands on shared speech production processes. At a cognitive level, enhanced activation for reading in speech production areas may reflect the multiple and competing phonological codes that are generated from the sublexical parts of written words. At a neuronal level, it may reflect differences in the speed with which different areas are activated and integrate with one another.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-4H0S1G9-3-3\&_cdi=6968\&_user=29041\&_orig=search\&_coverDate=01\%2F15\%2F2006\&_sk=999709997\&view=c\&wchp=dGLbVtz-zSkWz\&md5=8d3cb64bc2940d4b3fbc21b46347e170\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2005.07.044}, author = {Price, CJ and McCrory, E and Noppeney, U and Mechelli, A and Moore, CJ and Biggio, N and Devlin, JT} } @Article { 3700, title = {Action selectivity in parietal and temporal cortex}, journal = {Cognitive Brain Research}, year = {2005}, month = {10}, volume = {25}, number = {3}, pages = {641-649}, abstract = {The sensory-action theory proposes that the neural substrates underlying action representations are related to a visuomotor action system encompassing the left ventral premotor cortex, the anterior intraparietal (AIP) and left posterior middle temporal gyrus (LPMT). Using fMRI, we demonstrate that semantic decisions on action, relative to non-action words, increased activation in the left AIP and LPMT irrespective of whether the words were presented in a written or spoken form. Left AIP and LPMT might thus play the role of amodal semantic regions that can be activated via auditory as well as visual input. Left AIP and LPMT did not distinguish between different types of actions such as hand actions and whole body movements, although a right STS region responded selectively to whole body movements.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6SYV-4HCDJPK-2-1\&_cdi=4844\&_user=29041\&_orig=search\&_coverDate=12\%2F31\%2F2005\&_sk=999749996\&view=c\&wchp=dGLzVzz-zSkzk\&md5=4f489e37b5b19e80aafc6d868be9f80b\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.cogbrainres.2005.08.017}, author = {Noppeney, U and Josephs, O and Kiebel, S and Friston, KJ and Price, CJ} } @Article { 3692, title = {Early visual deprivation induces structural plasticity in gray and white matter}, journal = {Current Biology}, year = {2005}, month = {7}, volume = {15}, number = {13}, pages = {R488-R490}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6VRT-4GKWD99-8-5\&_cdi=6243\&_user=29041\&_orig=browse\&_coverDate=07\%2F12\%2F2005\&_sk=999849986\&view=c\&wchp=dGLbVzb-zSkzS\&md5=edf3541e4f5e4d2a402cc5a4e1c224ce\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.cub.2005.06.053}, author = {Noppeney, U and Friston, KJ and Ashburner, J and Frackowiak, R and Price, CJ} } @Article { 3686, title = {Reading skills after left anterior temporal lobe resection: an fMRIstudy.}, journal = {Brain}, year = {2005}, month = {6}, volume = {128}, number = {6}, pages = {1377-1385}, web_url = {http://intl-brain.oxfordjournals.org/cgi/reprint/128/6/1377}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/brain/awh414}, author = {Noppeney, U and Price, CJ and Duncan, JS and Koepp, MJ} } @Article { 3685, title = {Two Distinct Neural Mechanisms for Category-selective Responses}, journal = {Cerebral Cortex}, year = {2005}, month = {6}, volume = {16}, number = {3}, pages = {437-445}, abstract = {The cognitive and neural mechanisms mediating category-selective responses in the human brain remain controversial. Using functional magnetic resonance imaging and effective connectivity analyses (Dynamic Causal Modelling), we investigated animal- and tool-selective responses by manipulating stimulus modality (pictures versus words) and task (implicit versus explicit semantic). We dissociated two distinct mechanisms that engender category selectivity: in the ventral occipito-temporal cortex, tool-selective responses were observed irrespective of task, greater for pictures and mediated by bottom-up effects. In a left temporo-parietal action system, tool-selective responses were observed irrespective of modality, greater for explicit semantic tasks and mediated by top-down modulation from the left prefrontal cortex. These distinct activation and connectivity patterns suggest that the two systems support different cognitive operations, with the ventral occipito-temporal regions engaged in structural processing a nd the dorsal visuo-motor system in strategic semantic processing. Consistent with current semantic theories, explicit semantic processing of tools might thus rely on reactivating their associated action representations via top-down modulation. In terms of neuronal mechanisms, the category selectivity may be mediated by distinct top-down (task-dependent) and bottom-up (stimulus-dependent) mechanisms.}, web_url = {http://cercor.oxfordjournals.org/cgi/reprint/16/3/437}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/cercor/bhi123}, author = {Noppeney, U and Price, CJ and Penny, WD and Friston, KJ} } @Article { 3691, title = {Degenerate neuronal systems sustaining cognitive functions}, journal = {Journal of Anatomy}, year = {2004}, month = {12}, volume = {205}, number = {6}, pages = {433-442}, abstract = {The remarkable resilience of cognitive functions to focal brain damage suggests that multiple degenerate neuronal systems can sustain the same function either via similar mechanisms or by implementing different cognitive strategies. In degenerate functional neuroanatomy, multiple degenerate neuronal systems might be present in a single brain where they are either co-activated or remain latent during task performance. In degeneracy over subjects, a particular function may be sustained by only one neuronal system within a subject, but by different systems over subjects. Degeneracy over subjects might have arisen from (ab)normal variation in neurodevelopmental trajectories or long-term plastic changes following structural lesions. We discuss how degenerate neuronal systems can be revealed using (1) intersubject variability, (2) multiple lesion studies and (3) an iterative approach integrating information from lesion and functional imaging studies.}, web_url = {http://www.blackwell-synergy.com/doi/pdf/10.1111/j.0021-8782.2004.00343.x}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1111/j.0021-8782.2004.00343.x}, author = {Noppeney, U and Friston, KJ and Price, CJ} } @Article { 3679, title = {Kommentare zu B. R{\"o}der und F. R{\"o}sler: Kompensatorische Plastizit{\"a}t bei blinden Menschen}, journal = {Zeitschrift f{\"u}r Neuropsychologie}, year = {2004}, month = {10}, volume = {15}, number = {4}, pages = {272-273}, web_url = {http://psycontent.metapress.com/content/vt8228661026367v/?p=79f7954e61004d669a0a94cfcf6a6608\&pi=3}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {de}, DOI = {10.1024/1016-264X.15.4.272}, author = {Noppeney, U} } @Article { 3658, title = {Neurolinguistics: structural plasticity in the bilingual brain.}, journal = {Nature}, year = {2004}, month = {10}, volume = {431}, number = {7010}, pages = {757-757}, abstract = {Humans have a unique ability to learn more than one language-a skill that is thought to be mediated by functional (rather than structural) plastic changes in the brain. Here we show that learning a second language increases the density of grey matter in the left inferior parietal cortex and that the degree of structural reorganization in this region is modulated by the proficiency attained and the age at acquisition. This relation between grey-matter density and performance may represent a general principle of brain organization.}, web_url = {http://www.nature.com/nature/journal/v431/n7010/pdf/431757a.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1038/431757a}, author = {Mechelli, A and Crinion, JT and Noppeney, U and Ashburner, J and Frackowiak, RS and Price, CJ} } @Article { 3697, title = {An FMRI study of syntactic adaptation}, journal = {Journal of Cognitive Neuroscience}, year = {2004}, month = {5}, volume = {16}, number = {4}, pages = {702-713}, abstract = {It is easier to produce and comprehend a series of sentences when they have similar syntactic structures. This ''syntactic priming'' effect was investigated during silent sentence reading using (i) blood oxygenation level-dependent (BOLD) response as a physiological measure in an f MRI study and (ii) reading time as a behavioral measure in a complementary self paced reading paradigm. We found that reading time and left anterior temporal activation were decreased when subjects read sentences with similar relative to dissimilar syntactic forms. Thus, syntactic adaptation during sentence comprehension is demonstrated in a neural area that has previously been linked to both lexical semantic and sentence processing.}, web_url = {http://jocn.mitpress.org/cgi/reprint/16/4/702}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1162/089892904323057399}, author = {Noppeney, U and Price, CJ} } @Article { 3688, title = {Retrieval of abstract semantics}, journal = {Neuroimage}, year = {2004}, month = {5}, volume = {22}, number = {1}, pages = {164-170}, abstract = {Behavioural and neuropsychological evidence suggests that abstract and concrete concepts might be represented, retrieved and processed differently in the human brain. Using fMRI, we demonstrate that retrieval of abstract relative to sensory-based semantics during synonym judgements increased activation in a left frontotemporal system that has been associated with semantic processing particularly at the sentence level. Since activation increases were observed irrespective of the degree of difficulty, we suggest that these differential activations might reflect a particular retrieval mechanism or strategy for abstract concepts. In contrast to sensory-based semantics, the meaning of abstract concepts is largely specified by their usage in language rather than by their relations to the physical world. Subjects might therefore generate an appropriate semantic sentential context to fully explore and specify the meaning of abstract concepts. Our results also explain why abstract semantics is vulnerable to left frontotemporal lesions.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-4BWCBT1-3-3\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=05\%2F31\%2F2004\&_sk=999779998\&view=c\&wchp=dGLbVtb-zSkzS\&md5=f928c2609d99db78c12d8806d9919f83\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2003.12.010}, author = {Noppeney, U and Price, CJ} } @Article { 3662, title = {The neural areas that control the retrieval and selection of semantics.}, journal = {Neuropsychologia}, year = {2004}, month = {3}, volume = {42}, number = {9}, pages = {1269-1280}, abstract = {Semantic retrieval consistently activates left inferior frontal regions, yet lesions to these areas do not typically result in semantic deficits. This discrepancy has led to the hypothesis that left prefrontal areas are primarily involved in executive processes while semantic information is retrieved from temporal cortices. We investigated semantic executive processing by changing, over trials, the semantic association to a stimulus. Using fMRI and PET, we demonstrate that changes in semantic association increased activation in temporal as well as frontal areas. The similar effects in temporal and frontal areas suggest that semantic executive processes are not confined to the left inferior frontal cortex but might be distributed throughout the semantic system.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6T0D-4BYJV2C-2-1M\&_cdi=4860\&_user=29041\&_orig=browse\&_coverDate=12\%2F31\%2F2004\&_sk=999579990\&view=c\&wchp=dGLzVzz-zSkWz\&md5=5139136b37f22059320b1b90fd2c95ac\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuropsychologia.2003.12.014}, author = {Noppeney, U and Phillips, J and Price, CJ} } @Article { 3681, title = {Normal and pathological reading: converging data from lesion and imaging studies.}, journal = {Neuroimage}, year = {2003}, month = {11}, volume = {20}, number = {Supplemet 1}, pages = {S30-S41}, abstract = {In this paper we discuss cognitive and anatomical models of reading that have emerged from behavioral and lesion studies of dyslexia and functional neuroimaging studies of normal subjects. We then suggest that discrepancies in their findings can partly be overcome by functional neuroimaging studies of patients with acquired dyslexia. We present two such studies. One patient had a large left temporoparietal lesion which limited his reading to words with high semantic associations. When he read these words aloud, activation was observed in all areas of the normal reading system with the exception of the damaged left superior temporal lobe. The second patient had anterior temporal lobe atrophy with semantic dementia and a deficit in reading words that rely on lexical or semantic mediation. When asked to read aloud words on which she was likely to succeed, she activated all the normal areas, with increased activation in a left sensorimotor area associated with phonological processing and decreased activation in several areas associated with semantic processing. By relating these findings to those from lesion studies and imaging studies of normals, we propose that the translation of orthography to phonology is mediated semantically by the anterior part of the left midfusiform gyrus. In contrast, when semantic processing is compromised, the translation of orthography to phonology will be more reliant on the posterior part of the left midfusiform and the left frontal areas associated with phonology. Future studies are required to examine the connectivity between these areas during normal and abnormal reading.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-49V3GH4-1-H\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=11\%2F30\%2F2003\&_sk=999799999.8998\&view=c\&wchp=dGLbVzz-zSkWA\&md5=06b8119a2bbf9f4da810b71deeab06e3\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/j.neuroimage.2003.09.012}, author = {Price, CJ and Gorno-Tempini, ML and Graham, KS and Biggio, N and Mechelli, A and Patterson, K and Noppeney, U} } @Article { 3702, title = {A dynamic causal modeling study on category effects: bottom-up or top-down mediation?}, journal = {Journal of Cognitive Neuroscience}, year = {2003}, month = {10}, volume = {15}, number = {7}, pages = {925-934}, web_url = {http://jocn.mitpress.org/cgi/reprint/15/7/925}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1162/089892903770007317}, author = {Mechelli, A and Price, CJ and Noppeney, U and Friston, KJ} } @Article { 3682, title = {Cortical localisation of the visual and auditory word form areas: a reconsideration of the evidence.}, journal = {Brain and Language}, year = {2003}, month = {8}, volume = {86}, number = {2}, pages = {272-286}, abstract = {In this paper we examine the evidence for human brain areas dedicated to visual or auditory word form processing by comparing cortical activation for auditory word repetition, reading, picture naming, and environmental sound naming. Both reading and auditory word repetition activated left lateralised regions in the frontal operculum (Broca}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WC0-47YH4PG-8-H\&_cdi=6724\&_user=29041\&_orig=browse\&_coverDate=08\%2F31\%2F2003\&_sk=999139997\&view=c\&wchp=dGLbVtb-zSkWz\&md5=b74fde36aa773d35e0305ba9f780c3ca\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/S0093-934X(02)00544-8}, author = {Price, CJ and Winterburn, D and Giraud, AL and Moore, CJ and Noppeney, U} } @Article { 3670, title = {Effects of visual deprivation on the organization of the semantic system.}, journal = {Brain}, year = {2003}, month = {7}, volume = {126}, number = {7}, pages = {1620-1627}, abstract = {Early onset blindness provides a lesion model to investigate whether experience-dependent mechanisms subtend the functional anatomy of semantic retrieval. In particular, visual deprivation might alter the neural systems underlying retrieval of semantic information that is acquired via visual experience. Using functional MRI, we demonstrate that both early blind and sighted subjects activate a left-lateralized fronto-temporal}, web_url = {http://brain.oxfordjournals.org/cgi/reprint/126/7/1620}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1093/brain/awg152}, author = {Noppeney, U and Friston, KJ and Price, CJ} } @Article { 3713, title = {How is the fusiform gyrus related to category-specificity?}, journal = {Cognitive Neuropsychology}, year = {2003}, month = {5}, volume = {20}, number = {3-6}, pages = {561-574}, abstract = {There is growing evidence from functional imaging studies that distinct regions in the fusiform gyri are differentially sensitive to object category. In this paper, we investigate how the areas that are more sensitive to animals than tools respond to other visual and semantic variables. We illustrate that (1) category effects in the fusiform areas are stronger for pictures of objects than their written names; (2) retrieving information on the colour or size of objects activates a left lateralised fusiform area that lies anterior to the category-sensitive areas; and (3) both left and right category-sensitive areas respond strongly to visual feature detection on false fonts-meaningless visual stimuli with no semantic associations. These results dissociate the responses in two fusiform areas: The posterior category-sensitive areas are primarily modulated by visual input, whereas a more anterior polymodal region is involved in the retrieval of visual information. In addition, we demonstrate that the posterior areas which are more active for animals than tools are also more active for fruits than tools. Our data are therefore consistent with the proposal that activation in the lateral posterior fusiform gyri reflects the demands on structural differentiation. Since animals and fruits tend to have more structurally similar neighbours than man-made kinds of objects, category effects are likely to be observed during most picture identification tasks. In contrast, when the stimuli are written or auditory names, category effects may only be observed when the task requires access to fine spatial details in the objects‘ structures.}, web_url = {http://www.informaworld.com/smpp/title\verb=~=content=t713659042}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1080/02643290244000284}, author = {Price, CJ and Noppeney, U and Phillips, JA and Devlin, JT} } @Article { 3687, title = {Functional imaging of the semantic system: retrieval of sensory-experienced and verbally learned knowledge}, journal = {Brain and Language}, year = {2003}, month = {1}, volume = {84}, number = {1}, pages = {120-133}, abstract = {This paper considers how functional neuro-imaging can be used to investigate the organization of the semantic system and the limitations associated with this technique. The majority of the functional imaging studies of the semantic system have looked for divisions by varying stimulus category. These studies have led to divergent results and no clear anatomical hypotheses have emerged to account for the dissociations seen in behavioral studies. Only a few functional imaging studies have used task as a variable to differentiate the neural correlates of semantic features more directly. We extend these findings by presenting a new study that contrasts tasks that differentially weight sensory (color and taste) and verbally learned (origin) semantic features. Irrespective of the type of semantic feature retrieved, a common semantic system was activated as demonstrated in many previous studies. In addition, the retrieval of verbally learned, but not sensory-experienced, features enhanced activation in medial and lateral posterior parietal areas. We attribute these ''verbally learned'' effects to differences in retrieval strategy and conclude that evidence for segregation of semantic features at an anatomical level remains weak. We believe that functional imaging has the potential to increase our understanding of the neuronal infrastructure that sustains semantic processing but progress may require multiple experiments until a consistent explanatory framework emerges.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WC0-47MSJW1-4-F\&_cdi=6724\&_user=29041\&_orig=browse\&_coverDate=01\%2F31\%2F2003\&_sk=999159998\&view=c\&wchp=dGLbVzW-zSkzS\&md5=1628313f3c6db8a82be6a61f658e35c8\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/S0093-934X(02)00525-4}, author = {Noppeney, U and Price, CJ} } @Article { 3684, title = {Can segregation within the semantic system account for category-specific deficits?}, journal = {Brain}, year = {2002}, month = {9}, volume = {125}, number = {9}, pages = {2067-2080}, abstract = {Functional neuroimaging was used to investigate the extent to which category-specific semantic deficits in patients can be accounted for in terms of the demands placed on neural systems underlying different types of semantic knowledge. Unlike previous functional imaging studies of category specificity, we used a factorial design that crossed category (tools and fruits) with tasks requiring retrieval of either action or perceptual (real life size) knowledge. The presentation of tools relative to fruit increased activation in the same left posterior middle temporal area that was linked to the retrieval of action knowledge in general (for fruit as well as tools). However, we found no correlation between activation evoked by fruit and the size retrieval task. The left medial anterior temporal cortex was the only region to be activated for fruit relative to tools. We argue that the sensory-functional theory of category-specific effects is insufficient to account for the current neuroimaging literature. However, the data do support a more refined version of the theory: tools, relative to fruit, are more strongly linked to manipulative/motor knowledge and, for some tasks, fruit may be more reliant on integrating multiple semantic features.}, web_url = {http://brain.oxfordjournals.org/cgi/reprint/125/9/2067}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, author = {Phillips, JA and Noppeney, U and Humphreys, GW and Price, CJ} } @Article { 3712, title = {The neural substrates of action retrieval: An examination of semantic and visual routes to action}, journal = {Visual Cognition}, year = {2002}, month = {5}, volume = {9}, number = {4-5}, pages = {662-685}, abstract = {We report three PET experiments that examine the neural substrates of the conceptual components of action retrieval. In all three experiments, subjects made action or screen-size decisions to familiar objects presented either as pictures or written words (the names ofthe objects). In Experiment 1, a third task was included, requiring a decision on the real-life size of the stimuli. In Experiment 2, a third stimulus type was included, with action and size decisions also performed on pictures of meaningless novel objects. Finally, in Experiment 3, we changed the response mode from a button press to a more explicit movement made with a \‘\‘manipulandum\‘\‘. Based on neuropsychological findings, we predicted that when action responses were made to pictures of familiar or novel objects, relative to words, there would be less activation in semantic regions but greater activation in visual, motor, and perhaps parietal cor tices. We found that, action relative to screen-size decisions on both pictures and words activated the left hemisphere temporo-frontal semantic system with activation in the left posterior middle temporal cortex specific to action retrieval (Experiment 1). In addition, action retrieval elicited more activation for (1) words than pictures in areas associated with semantics; and (2) novel objects than words or familiar objects in areas associated with pre-semantic object processing. These results are discussed in the context of semantic and visual routes to action retrieval.}, web_url = {http://www.informaworld.com/smpp/ftinterface\verb=~=content=a713756966\verb=~=fulltext=713240930}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1080/13506280143000610}, author = {Phillips, JA and Humphreys, GW and Noppeney, U and Price, CJ} } @Article { 3696, title = {A PET study of stimulus- and task-induced semantic processing}, journal = {Neuroimage}, year = {2002}, month = {4}, volume = {15}, number = {4}, pages = {927-935}, abstract = {To investigate the neural correlates of semantic processing, previous functional imaging studies have used semantic decision and generation tasks. However, in addition to activating semantic associations these tasks also involve executive functions that are not specific to semantics. The study reported in this paper aims to dissociate brain activity due to stimulus-driven semantic associations and task-induced semantic and executive processing by using repetition and semantic decision on auditorily presented words in a cognitive conjunction design. The left posterior inferior temporal, inferior frontal (BA 44/45), and medial orbital gyri were activated by both tasks, suggesting a general role in stimulus-driven semantic and phonological processing. In addition, semantic decision increased activation in (i) left ventral inferior frontal cortex (BA 47), right cerebellum, and paracingulate, which have all previously been implicated in executive functions, and (ii) a ventral region in the left anterior temporal pole which is commonly affected in patients with semantic impairments. We attribute activation in this area to the effortful linkage of semantic features. Thus, our study replicated the functional dissociation between dorsal and ventral regions of the left inferior frontal cortex. Moreover, it also dissociated the semantic functions of the left posterior inferior temporal gyrus and anterior temporal pole: The posterior region subserves stimulus-driven activation of semantic associations and the left anterior region is involved in task-induced association of semantic information.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-45FSGJF-K-1\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=04\%2F30\%2F2002\&_sk=999849995\&view=c\&wchp=dGLbVtb-zSkWz\&md5=9f5850bc81492e3461aa2ff6887ffb9e\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1006/nimg.2001.1015}, author = {Noppeney, U and Price, CJ} } @Article { 3695, title = {Retrieval of visual, auditory, and abstract semantics}, journal = {Neuroimage}, year = {2002}, month = {4}, volume = {15}, number = {4}, pages = {917-926}, abstract = {Conceptual knowledge is thought to be represented in a large distributed network, indexing a range of different semantic features (e.g., visual, auditory, functional). We investigated the anatomical organization of these features, using PET, by contrasting brain activity elicited by heard words with (i) visual (e.g., blue), (ii) auditory (e.g., noise), or (iii) abstract (e.g., truth) meaning. The activation task was either repetition or semantic decision (e.g., does the meaning of the word relate to religion?). In the baseline conditions, the sound track of the words was reversed and subjects had to say ''OK'' (control for repetition) or make an acoustic decision (control for semantic decision). Irrespective of task, words relative to their corresponding controls activated the left posterior inferior temporal and inferior frontal cortices. In addition, semantic decisions on words with sensory (visual and auditory) meanings enhanced activation in a ventral region of the left anterior temporal pole. These results are consistent with neuropsychological studies showing that anterior temporal lobe damage can cause deficits for items that are mainly defined by their sensory features (i.e., concrete, particularly living items). Since modality-specific activation was observed only during the semantic decision task, we discuss whether it reflects retrieval of sensory semantics per se or the degree to which semantic associations are triggered during effortful retrieval.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-45FSGJF-J-1\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=04\%2F30\%2F2002\&_sk=999849995\&view=c\&wchp=dGLbVlb-zSkWz\&md5=0c97352b6763c863f0f9b2e44cb88805\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1006/nimg.2001.1016}, author = {Noppeney, U and Price, CJ} } @Article { 3704, title = {Anatomic constraints on cognitive theories of category specificity.}, journal = {Neuroimage}, year = {2002}, month = {3}, volume = {15}, number = {3}, pages = {675-685}, abstract = {Many cognitive theories of semantic organization stem from reports of patients with selective, category-specific deficits for particular classes of objects (e.g., fruit). The anatomical assumptions underlying the competing claims can be evaluated with functional neuroimaging but the findings to date have been inconsistent and insignificant when standard statistical criteria are adopted. We hypothesized that category differences in functional brain responses might be small and task dependent. To test this hypothesis, we entered data from seven PET studies into a single multifactorial design which crossed category (living vs man-made) with a range of tasks. Reliable category-specific effects were observed but only for word retrieval and semantic decision tasks. Living things activated medial aspects of the anterior temporal poles bilaterally while tools activated a left posterior middle temporal region. These category-by-task interactions provide robust evidence for an anatomical double dissociation according to category and place strong constraints on cognitive theories of the semantic system. Furthermore they reconcile some of the apparent inconsistencies between lesion studies and functional neuroimaging data.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WNP-457VFVM-R-1\&_cdi=6968\&_user=29041\&_orig=browse\&_coverDate=03\%2F31\%2F2002\&_sk=999849996\&view=c\&wchp=dGLbVlz-zSkWz\&md5=62e7e4968c3c13f54b920bbac28eace5\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1006/nimg.2001.1002}, author = {Devlin, JT and Moore, CJ and Mummery, CJ and Gorno-Tempini, ML and Phillips, JA and Noppeney, U and Frackowiak, RSJ and Friston, KJ and Price, CJ} } @Article { 3701, title = {Kurt Goldstein: a philosophical scientist}, journal = {Journal of the History of the Neurosciences}, year = {2001}, month = {3}, volume = {10}, number = {1}, pages = {67-78}, abstract = {Kurt Goldstein was one of the major proponents of the holistic movement which was opposed to the classical connectionist Wernicke-Lichtheim-Model in aphasiology at the beginning of the 20th century. Influenced by Kant, Husserl and Cassirer, his work goes beyond purely empirical research and approaches traditionally philosophical questions: How can we proceed from empirical data to a theory adequately reflecting reality? How can we understand man as mind and body? What is the relation between language and cognition? Goldstein}, web_url = {http://www.informaworld.com/smpp/content\verb=~=db=all?content=10.1076/jhin.10.1.67.5626}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1076/jhin.10.1.67.5626}, author = {Noppeney, U} } @Article { 3693, title = {Language and cognition: Kurt Goldstein‘s Theory of Semantics}, journal = {Brain and Cognition}, year = {2000}, month = {12}, volume = {44}, number = {3}, pages = {367-386}, abstract = {Kurt Goldstein is regarded as one of the major proponents of the holistic movement at the beginning of the 20th century. He rejected the strong localization hypothesis in the field of aphasiology and attempted to link language disturbances to an underlying general intellectual impairment. Goldstein‘s criticism was based on his subtle symptomatology, his organismic biology, and his philosophical reflections. In his concept of abstract attitude Goldstein searched for a general psychological function that might explain a variety of aphasic symptoms. Abstract attitude bridges the gap between cognitive and linguistic structures. According to Goldstein, it is the basis for words to have a meaning, to be employed in a categorical sense. Since amnesic aphasics are confined to a concrete attitude, their words have lost their representational function. Although Goldstein‘s concept of abstract attitude is no longer used in scientific discourse, it is analyzed for its heuristic value. It led Goldstein to questions about the relation between cognition and language and to fragments of a semantic theory.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6WBY-45FCDHK-4-1\&_cdi=6723\&_user=29041\&_orig=browse\&_coverDate=12\%2F31\%2F2000\&_sk=999559996\&view=c\&wchp=dGLbVzz-zSkzV\&md5=0a37c81e2e467bd099fea7acfb25d3b3\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1006/brcg.1999.1199}, author = {Noppeney, U and Wallesch, CW} } @Article { 3705, title = {Differential effects of pain and spatial attention on digit representation in the human primary somatosensory cortex}, journal = {Neuroreport}, year = {2000}, month = {4}, volume = {11}, number = {6}, pages = {1289-1293}, abstract = {Reorganization of primary somatosensory cortex subsequent to either reduced or enhanced peripheral input is well established. Recently, plastic changes following arm amputation in humans were shown to correlate with phantom limb pain. This raised the question whether spatial attention and pain may cause cortical reorganization in the absence of deafferentation. Using non-invasive neuroelectric imaging to study the digit representation in the human primary somatosensory cortex, we report a delayed shift of the representation of digits 2-3 due to pain on the digits 4-5, which outlasted the pain by several minutes. In contrast, reorganization during spatial attention was less pronounced, was seen almost immediately and only during the condition. These data indicate that spatial attention and pain without peripheral deafferentation cause cortical reorganization by different mechanisms. The differential time course of reorganizational effects observed at the cortex may be due to modulation of the lemniscal pathway s by nociceptive input from the spinal cord dorsal horn.}, web_url = {http://gateway.ovid.com/ovidweb.cgi?T=JS\&NEWS=N\&PAGE=fulltext\&AN=00001756-200004270-00029\&LSLINK=80\&D=ovft}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, author = {Buchner, H and Richrath, P and Gr{\"u}nholz, J and Noppeney, U and Waberski, TD and Gobbel{\'e}, R and Willmes, K and Treede, RD} } @Article { 3694, title = {Spatial attention modulates the cortical somatosensory representation of the digits in humans}, journal = {Neuroreport}, year = {1999}, month = {10}, volume = {10}, number = {15}, pages = {3137-3141}, abstract = {The topographic organization of the primary somatosensory cortex adapts to alterations of afferent input. Here, electric source imaging was used to show that spatial attention modifies cortical somatosensory representations in humans. The cortical representation of the electrically stimulated digit 2 (resp. digits 2 and 3) of the right hand was more medial along the somatosensory area 3b in subjects who focused attention on digit 4 of the right hand, while it was more lateral when subjects attended digit 4 of the contralateral hand. This effect was very fast since the direction of attention was changed every 6 min. The results indicate that cortical somatosensory representations not only depend on afferent input but vary when spatial attention is directed towards different parts of the body.}, web_url = {http://ovidsp.tx.ovid.com/sp-2.3.1b/ovidweb.cgi?\&S=KMMMFPBKMIDDENDPNCDLGGMCMFEDAA00\&Abstract=S.sh.15.17|3|1}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, author = {Noppeney, U and Waberski, TD and Gobbel{\'e}, R and Buchner, H} } @Article { 3699, title = {Bilaterale Hypoglossusparese als isolierte neurologische Symptomatik nach Sch{\"a}del-Hirn-Trauma}, journal = {Nervenarzt}, year = {1999}, month = {4}, volume = {70}, number = {4}, pages = {357-358}, abstract = {We report a 72-year-old patient who developed an isolated bilateral hypoglossal nerve paralysis following head trauma with complete recovery after three months. Since the CT scan did not show any fractures of the posterior skull base, we discuss a traction nerve injury as a possible mechanism.}, web_url = {http://www.springerlink.com/content/89q4p8awxm885qte/fulltext.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {de}, DOI = {10.1007/s001150050448}, author = {Noppeney, U and Nacimiento, W} } @Article { 3706, title = {Sustained attention modulates the immediate effect of de-afferentation on the cortical representation of the digits: source localization of somatosensory evoked potentials in humans.}, journal = {Neuroscience Letters}, year = {1999}, month = {1}, volume = {260}, number = {1}, pages = {57-60}, abstract = {Long-term cortical reorganization of the somatotopic arrangement of the digits after alterations of the peripheral input is well established. Studies on the immediate effects of manipulating peripheral input have shown conflicting results indicating that additional factors might modulate cortical reorganization. We present a source localization study using somatosensory evoked potentials (SEP) following electric stimulation of digits one and five before and during anaesthesia of digits two, three and four in 10 normal volunteers. When attention was directed to a stimulus at the dorsal hand, the 3D-distance between digits one and five decreased during as compared to before anaesthesia. In contrast, this distance enlarged when subjects were not attending a particular stimulus. In this condition most subjects focused their attention on the clear sensation of the de-afferented hand region. These results indicate that attention modulates the effect of immediate cortical reorganization of the hand area during partial deafferentation. As an hypothesis: it may be speculated that the sensation of the de-afferentation results in increased synchronized activity of the de-afferented somatosensory cortex and, thus, to its enlarged representation. Conversely, if attention is directed to a different hand region, the representations of the neighboring digits may expand into the de-afferented cortex.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6T0G-3VKCVT0-H-3\&_cdi=4862\&_user=29041\&_orig=browse\&_coverDate=01\%2F22\%2F1999\&_sk=997399998\&view=c\&wchp=dGLzVzz-zSkWW\&md5=aa5c8d5c5b71560e852af6de8708b4e7\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {10.1016/S0304-3940(98)00948-3}, author = {Buchner, H and Reinartz, U and Waberski, TD and Gobbel{\'e}, R and Noppeney, U and Scherg, M} } @Inproceedings { EndresAGN2012, title = {Understanding the Semantic Structure of Human fMRI Brain Recordings with Formal Concept Analysis}, year = {2012}, month = {5}, pages = {96-111}, abstract = {We investigate whether semantic information related to object categories can be obtained from human fMRI BOLD responses with Formal Concept Analysis (FCA). While the BOLD response provides only an indirect measure of neural activity on a relatively coarse spatio-temporal scale, it has the advantage that it can be recorded from humans, who can be questioned about their perceptions during the experiment, thereby obviating the need of interpreting animal behavioral responses. Furthermore, the BOLD signal can be recorded from the whole brain simultaneously. In our experiment, a single human subject was scanned while viewing 72 gray-scale pictures of animate and inanimate objects in a target detection task. These pictures comprise the formal objects for FCA. We computed formal attributes by learning a hierarchical Bayesian classifier, which maps BOLD responses onto binary features, and these features onto object labels. The connectivity matrix between the binary features and the object labels can then serve as the formal context. In line with previous reports, FCA revealed a clear dissociation between animate and inanimate objects with the inanimate category also including plants. Furthermore, we found that the inanimate category was subdivided between plants and non-plants when we increased the number of attributes extracted from the BOLD response. FCA also allows for the display of organizational differences between high-level and low-level visual processing areas. We show that subjective familiarity and similarity ratings are strongly correlated with the attribute structure computed from the BOLD signal.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2012/ICFCA-2012-Adam.pdf}, department = {Research Group Noppeney}, web_url = {http://www.econ.kuleuven.be/ICFCA/}, editor = {Domenach, F. , D.I. Ignatov, J. Poelmans}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 7278}, booktitle = {Formal Concept Analysis}, event_place = {Leuven, Belgium}, event_name = {10th International Conference on Formal Concept Analysis (ICFCA 2012)}, ISBN = {978-3-642-29891-2}, DOI = {10.1007/978-3-642-29892-9_13}, author = {Endres, A and Adam, R and Giese, MA and Noppeney, U} } @Inproceedings { HuszarNL2010, title = {Mind Reading by Machine Learning: A doubly Bayesian Method for Inferring Mental Representations}, year = {2010}, month = {8}, pages = {2810-2815}, abstract = {A central challenge in cognitive science is to measure and quantify the mental representations humans develop \{ in other words, to `read' subject's minds. In order to elimi- nate potential biases in reporting mental contents due to verbal elaboration, subjects' responses in experiments are often limited to binary decisions or discrete choices that do not require conscious re ection upon their mental contents. However, it is unclear what such impoverished data can tell us about the potential richness and dy- namics of subjects' mental representations. To address this problem, we used ideal observer models that for- malise choice behaviour as (quasi-)Bayes-optimal, given subjects' representations in long-term memory, acquired through prior learning, and the stimuli currently avail- able to them. Bayesian inversion of such ideal observer models allowed us to infer subjects' mental representation from their choice behaviour in a variety of psychophysical tasks. The inferred mental representations also allowed us to predict future choices of subjects with reasonable accuracy, even in tasks that were di erent from those in which the representations were estimated. These results demonstrate a signi cant potential in standard binary decision tasks to recover detailed information about sub- jects' mental representations.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2011/COGSCI-2010-Huszar.pdf}, department = {Research Group Noppeney}, web_url = {http://cognitivesciencesociety.org/wp-content/uploads/archival/cognitivesciencesociety.org/conference2010/schedule.html}, editor = {Ohlsson, S. , R. Catrambone}, publisher = {Cognitive Science Society}, address = {Austin, TX, USA}, booktitle = {Cognition in Flux}, event_place = {Portland, OR, USA}, event_name = {32nd Annual Conference of the Cognitive Science Society (CogSci 2010)}, ISBN = {978-0-9768318-6-0}, author = {Huszar, F and Noppeney, U and Lengyel, M} } @Inproceedings { 3709, title = {Functional imaging in reading epilepsy}, year = {2004}, pages = {71-78}, web_url = {http://www.jle.com/en/ouvrages/e-docs/reflex_epilepsies_progress_in_understanding_263989/ouvrage.phtml}, editor = {Wolf, P. , Y. Inoue, B. G. Zifkin}, publisher = {John Libbey Eurotext}, address = {Montrouge, France}, series = {Current problems in epilepsy ; 19}, booktitle = {Reflex Epilepsies: Progress in Understanding}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Bielefeld, Germany}, event_name = {12th International Bethel-Cleveland Epilepsy Symposium 2001}, language = {en}, ISBN = {2-7420-0540-4}, author = {Koepp, MJ and Noppeney, U and Salek-Haddadi, A and Price, CJ} } @Inbook { Noppeney2011, title = {Kurt Goldstein und Frederik Buytendijk: Der Leib-Begriff in der organismischen Biologie}, year = {2012}, month = {3}, pages = {194-206}, department = {Research Group Noppeney}, web_url = {http://www.utb-shop.de/details.php?p_id=532397}, editor = {Alloa, E. , T. Bedorf, C. Gr{\"u}ny, T. N. Klass}, publisher = {Mohr Siebeck}, address = {T{\"u}bingen, Germany}, booktitle = {Leiblichkeit: Geschichte und Aktualit{\"a}t eines Konzepts}, ISBN = {978-3-8252-3633-5}, author = {Noppeney, U} } @Inbook { 6652, title = {Characterization of Multisensory Integration with fMRI: Experimental Design, Statistical Analysis, and Interpretation}, year = {2012}, month = {1}, pages = {233-252}, abstract = {This chapter reviews the potential and limitations of functional magnetic resonance imaging (fMRI) in characterizing the neural processes underlying multisensory integration. The neural basis of multisensory integration can be characterized from two distinct perspectives. From the perspective of functional specialization, we aim to identify regions where information from different senses converges and/or is integrated. From the perspective of functional integration, we investigate how information from multiple sensory regions is integrated via interactions among brain regions. Combining these two perspectives, this chapter discusses experimental design, analysis approaches, and interpretational limitations of fMRI results. The first section describes univariate statistical analyses of fMRI data and emphasizes the interpretational ambiguities of various statistical criteria that are commonly used for the identification of multisensory integration sites. The second section explores the potential and limitations of multivariate and pattern classifier approaches in multisensory integration. The third section introduces effective connectivity analyses that investigate how multisensory integration emerges from distinct interactions among brain regions. The complementary strengths of data-driven and hypothesis-driven effective connectivity analyses will be discussed. We conclude by emphasizing that the combined potentials of these various analysis approaches may help us to overcome or at least ameliorate the interpretational ambiguities associated with each analysis when applied in isolation.}, department = {Research Group Noppeney}, web_url = {http://www.crcnetbase.com/doi/abs/10.1201/b11092-17}, editor = {Murray, M. M. , M. T. Wallace}, publisher = {CRC Press}, address = {Boca Raton, FL, USA}, series = {Frontiers in Neuroscience}, booktitle = {The neural bases of multisensory processes}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, ISBN = {978-1-439-81217-4}, DOI = {10.1201/b11092-17}, author = {Noppeney, U} } @Inbook { 5229, title = {The sensory-motor theory of semantics: Evidence from functional imaging}, year = {2009}, month = {1}, pages = {177-210}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {https://www.peterlang.com/view/product/62399}, editor = {Fuchs, S. , H. Loevenbruck, D. Pape, P. Perrier}, publisher = {Lang}, address = {Frankfurt (Main), Germany}, booktitle = {Some Aspects of Speech and the Brain}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, ISBN = {978-3-631-57630-4}, author = {Noppeney, U} } @Inbook { 3838, title = {The neural systems processing tool and action semantics}, year = {2007}, pages = {182-204}, abstract = {This chapter discusses the contributions of functional imaging to our understanding of how action and tool concepts are represented and processed in the human brain. Section 7.1. introduces cognitive models of semantic organization. Section 7.2. provides a brief overview of functional imaging approaches to identify brain regions that have specialized for processing action and tool representations. Section 7.3. discusses the relationship between the visuomotor system and semantic processing of actions. Section 7.4. investigates the effects of action type and visual experience on action-selective responses. Section 7.5. characterizes the neural systems engaged in tool processing and how they are modulated by task and stimulus modality. Section 7.6. delineates future directions that may enable us to characterize the neural mechanisms that mediate tool and action-selective brain responses.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.cambridge.org/gb/knowledge/isbn/item1172606/?site_locale=en_GB}, editor = {Hart, J. , M. A. Kraut}, publisher = {Cambridge University Press}, address = {Cambridge, UK}, booktitle = {The neural basis of semantic memory}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, ISBN = {0-521-84870-9}, author = {Noppeney, U} } @Inbook { 3710, title = {Functional neuroimaging of neuropsychologically impaired patients}, year = {2006}, month = {5}, pages = {455-480}, department = {Research Group Noppeney}, web_url = {http://cognet.mit.edu/library/erefs/cabeza/}, editor = {Cabeza, R. , A. Kingstone}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, booktitle = {Handbook of Functional Neuroimaging of Cognition}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, author = {Price, CJ and Noppeney, U and Friston, KJ} } @Inbook { 3707, title = {The Feature-Based Model of Semantic Memory}, year = {2004}, pages = {533-545}, abstract = {This chapter focuses on functional imaging studies that investigate the feature-based model of semantic organization and tests for anatomical segregation underlying different types of semantic features. It discusses the contributions of functional imaging to our understanding of how conceptual knowledge is represented in the human brain. The first section outlines the feature-based account of semantic organization. The feature-based account can explain category-specific semantic deficits for living and nonliving items without assuming category-specificity as an underlying organizational principle of semantic memory. The feature-based model is thought to be implemented in the human brain in terms of input and output channels. This chapter also describes the potential and pitfalls of functional imaging as a means to investigate the organizational principles of semantic memory. Furthermore, it reviews functional imaging evidence for a role of a left posterior middle temporal area in action semantics and a left fusiform area in visual semantics. Finally, it discusses the feature-based account and concludes that specialization of brain regions for different types of semantic knowledge can only be understood within particular task contexts.}, web_url = {http://www.sciencedirect.com/science/article/pii/B9780122648410500299}, editor = {Frackowiak, R.S.J. , K.J. Friston, C.D. Frith, R.J. Dolan, C.J. Price, S. Zeki, J.T. Ashburner, W.D. Penny}, publisher = {Elsevier}, address = {Amsterdam, Netherlands}, booktitle = {Human Brain Function}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, ISBN = {978-0-12-264841-0}, DOI = {10.1016/B978-012264841-0/50029-9}, author = {Noppeney, U} } @Poster { DelongGARCWN2017, title = {The invisible ventriloquist: can unaware flashes alter sound perception?}, journal = {Brain and Neuroscience Advances}, year = {2017}, month = {4}, day = {10}, volume = {1}, number = {BNA 2017 Festival of Neuroscience: Abstract Book}, pages = {27}, abstract = {Information integration across the senses is fundamental for effective interactions with our environment. A controversial question is whether signals from different senses can interact in the absence of awareness. Models of global workspace would predict that unaware signals are confined to processing in low level sensory areas and thereby prevented from interacting with signals from other senses in higher order association areas. Yet, accumulating evidence suggests that multisensory interactions can emerge – at least to some extent- already at the primary cortical level [1]. These low level interactions may thus potentially mediate interactions between sensory signals in the absence of awareness. Combining the spatial ventriloquist illusion and dynamic continuous flash suppression (dCSF) [2] we investigated whether visual signals that observers did not consciously perceive can influence spatial perception of sounds. Importantly, dCFS obliterated visual awareness only on a fraction of trials allowing us to compare spatial ventriloquism for physically identical flashes that were judged visible or invisible. Our results show a stronger ventriloquist effect for visible than invisible flashes. Yet, a robust ventriloquist effect also emerged for flashes judged invisible. This ventriloquist effect for invisible flashes was even preserved in participants that were not better than chance when locating flashes they judged ‘invisible’. Collectively, our findings demonstrate that physically identical visual signals influence the perceived location of concurrent sounds depending on their subjective visibility. Even visual signals that participants are not aware of can alter sound perception. These results suggest that audiovisual signals are integrated into spatial representations to some extent in the absence of perceptual awareness.}, department = {Department B{\"u}lthoff}, department2 = {Department Logothetis}, web_url = {http://journals.sagepub.com/doi/pdf/10.1177/2398212817705279}, event_place = {Birmingham, UK}, event_name = {BNA 2017 Festival of Neuroscience (British Neuroscience Association)}, author = {Delong, P and Giani, A and Aller, M and Rohe, T and Conrad, V and Watanabe, M and Noppeney, U} } @Poster { GauBTTN2016, title = {Layer-specific attentional modulation and multisensory interactions in sensory cortices}, year = {2016}, month = {6}, day = {28}, number = {2301}, abstract = {Introduction: Accumulating evidence suggests that audiovisual interactions are not deferred to association areas, but start already at the primary cortical level. Moreover, visual stimuli induce fMRI deactivations in primary auditory areas as well as activations in the visual system, and vice versa for auditory stimuli. It is unknown whether these crossmodal deactivations are caused by top-down effects from association areas reflecting the withdrawal of attentional resources from the non-stimulated modality, or via direct connections between sensory areas. Previous neurophysiological studies in animals have demonstrated that visual and auditory stimuli induce activations in auditory cortices with distinct layer-dependent profiles (feedforward/granular vs. backward/infra+supragranular). This study used 7T fMRI to resolve BOLD responses at different cortical depths and characterize the effects of sensory stimulation and modality-specific attention on the cortical layer-specific activation profiles. Methods: Thirteen participants took part in this fMRI experiment at 7T. In a 3 (stimulation modality) X 2 (modality-specific attention) design, they were presented with 30-second blocks of visual (concentric looming white circles on black background), auditory (looming frequency-modulated pure tones) or audio-visual stimuli. They attended and responded to either visual or auditory targets. For anatomical reference, we acquired a whole-brain T1 map with a MP2RAGE sequence (spatial resolution: (0.7 mm)3; TR=5000 ms; TE=2.45 ms; TI1/2=900 ms/2750 ms; FA1/2=5\(^{\circ}\)/3\(^{\circ}\); iPAT=2). During the functional experiments, we acquired 46 axial EPI slices with an axial coverage of 3.6 cm (spatial resolution: (0.75 mm)3; TR=3000 ms; TE=25 ms; FA=90; iPAT=4). For cortical depth-dependent analysis, the cortex was segmented, upsampled to a (0.4 mm)3 resolution and automatically contoured into 6 laminae using the CBS Tools and MIPAV (Waehnert 2013). Using SPM, the functional data were realigned, unwarped, coregistered to the up-sampled whole-brain T1 map and regridded to (0.4 mm)3 resolution. The first level subject-specific GLM analysis independently modelled the 3 blocks of the 6 conditions and the target events (i.e. 3*6+1=19 regressors). The parameter estimates for 6 conditions x 3 blocks x 4 sessions were averaged within 6 layers for each ROI. For each condition, the 6 layer-specific parameter estimates x 3 blocks x 4 sessions were entered into a second level subject-specific GLM that modelled the cortical profile across the 6 layers by a constant, linear and quadratic function across layers. The parameter estimates for the constant, linear and quadratic were entered in independent one sample t-tests at the third random effects level. Results: Behavioural results: 80\% accuracy for target detection indicating that participant maintained modality-specific attention fMRI results: Auditory stimulation induced sustained deactivations in visual cortices that were most pronounced at superficial layers (i.e. significant linear effect in V1-5). No significant cross-modal deactivations were observed in A1. Super-additive audio-visual interactions (i.e. AV\(\neq\)A+V) were maximal at the cortical layer nearest the surface, in visual areas V1-V5 (i.e. significant linear effect in V1-5) Visual attention, when compared with auditory attention, reduced activations in V1-2 similarly across all layers, and increased activations in visual areas V3-5, predominantly in the more superficial cortical layers (i.e. significant linear effect in V4v). Conclusions: Our results demonstrate that sensory BOLD activations and cross-modal deactivations are largest near the cortical surface. Likewise, audio-visual interactions are observed predominantly in superficial layers. Surprisingly, increased visual attention reduced visual-evoked BOLD activation in V1 and V2, irrespective of cortical depth, but produced increased activation in V4v, predominantly at the cortical surface. This may indicate distinct attentional mechanisms.}, department = {Research Group Noppeney}, web_url = {https://ww5.aievolution.com/hbm1601/index.cfm?do=abs.viewAbs\&abs=3025}, event_place = {Geneva, Switzerland}, event_name = {22nd Annual Meeting of the Organization for Human Brain Mapping (OHBM 2016)}, author = {Gau, R and Bazin, P-L and Trampel, R and Turner, R and Noppeney, U} } @Poster { BeierholmRSN2016, title = {Using the past to estimate sensory uncertainty}, year = {2016}, month = {2}, number = {I-1}, pages = {50}, abstract = {Combining multiple sources of information requires an estimate of the reliability of each source in order to perform optimal information integration. The human brain is faced with this challenge whenever processing multisensory stimuli, however how the brain estimates the reliability of each source is unclear with most studies assuming that the reliability is directly available. In practice however reliability of an information source requires inference too, and may depend on both current and previous information, a problem that can neatly be placed in a Bayesian framework. We performed three audio-visual spatial localization experiments where we manipulated the uncertainty of the visual stimulus over time. Subjects were presented with simultaneous auditory and visual cues in the horizontal plane and were tasked with locating the auditory cue. Due to the well-known ventriloquist illusion responses were biased towards the visual cue, depending on its reliability. We found that subjects changed their estimate of the visual reliability not only based on the presented visual stimulus, but were also influenced by the history of visual stimuli. The finding implies that the estimated reliability is governed by a learning process, here operating on a timescale on the order of 10 seconds. Using model comparison we found for all three experiments that a hierarchical Bayesian model that assumes a slowly varying reliability is best able to explain the data. Together these results indicate that the subjects’ estimated reliability of stimuli changes dynamically and thus that the brain utilizes the temporal dynamics of the environment by combining current and past estimates of reliability.}, department = {Department B{\"u}lthoff}, web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_16}, event_place = {Salt Lake City, UT, USA}, event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2016)}, author = {Beierholm, U and Rohe, T and Stegle, O and Noppeney, U} } @Poster { RoheN2015_3, title = {Task-dependent reliability-weighted integration of audiovisual spatial signals in parietal cortex}, year = {2015}, month = {6}, day = {17}, volume = {21}, number = {4244}, abstract = {Introduction: To form a reliable percept of the multisensory environment, the brain integrates signals across the senses. To estimate for example an object's location from vision and audition, the optimal strategy is to integrate the object's audiovisual signals proportional to their reliability under the assumption that they were caused by a single source (i.e., maximum likelihood estimation, MLE). Behaviorally, it is well-established that humans integrate signals weighted by their reliability in a near-optimal fashion when integrating visual-haptic (Ernst and Banks, 2002) and audiovisual signals (Alais and Burr, 2004). Recently, elegant neurophysiological studies in macaques have shown that single neurons and neuronal populations implement reliability-weighted integration of visual-vestibular signals (Fetsch, et al., 2012; Morgan, et al., 2008). Yet, it is unclear how the human brain accomplishes this feat. Combining psychophysics and multivariate fMRI decoding in a spatial ventriloquist paradigm, we characterized the computational operations underlying audiovisual reliability-weighted integration at several cortical levels along the auditory and visual processing hierarchy. Methods: In a spatial ventriloquist paradigm, participants (N = 5) were presented with auditory and visual signals that were independently sampled from four locations along the azimuth (Fig. 1). The signals were presented alone in unisensory conditions or jointly in bisensory conditions. The spatial reliability of the visual signal was high or low. Participants localized either the auditory or the visual spatial signal. The behavioral signal weights were estimated by fitting psychometric functions to participants' localization responses in bisensory conditions without (0\(^{\circ}\), i.e. congruent) or with a small spatial discrepancy (± 6\(^{\circ}\)). These empirical weights were compared to weights which were predicted according to the MLE model from the signals' sensory reliabilities estimated in unisensory conditions. Similarly, neural signal weights were estimated by fitting 'neurometric' functions to the spatial locations decoded from regional fMRI activation patterns in bisensory conditions and compared to weight predictions from unisensory conditions. For decoding signal locations, a support vector machine was trained on activation patterns from congruent conditions and then generalized to data from discrepant conditions as well as unisensory conditions. Conclusions: In summary, the results demonstrate that higher-order multisensory regions perform probabilistic computations such as reliability-weighting. However, despite the small signal discrepancy, the signals were not mandatorily integrated as predicted by the MLE model because task-relevant signals attained larger weights. Thus, probabilistic multisensory computations might involve more complex processes than mandatory reliability-weighted integration, such as inferring whether the signals were caused by a common or independent sources (i.e., causal inference). Only under conditions in which the assumptions of a common source is fostered (e.g., by presenting only correlated signals with a small discrepancy), multisensory signals might be fully integrated weighted by their reliability.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3625}, event_place = {Honolulu, HI, USA}, event_name = {21st Annual Meeting of the Organization for Human Brain Mapping (OHBM 2015)}, author = {Rohe, T and Noppeney, U} } @Poster { GauTBTN2014, title = {Effect of sensory modality and attention on layer-specific activations in sensory cortices}, year = {2014}, month = {6}, day = {12}, volume = {20}, number = {4067}, abstract = {Introduction: Accumulating evidence suggests that audiovisual interactions are not deferred to higher order association areas, but start already at the primary cortical level. Moreover, visual stimuli have been shown to induce fMRI activations in the visual system, with deactivations in primary auditory areas, and vice versa for auditory stimuli. It is unknown whether these crossmodal deactivations are caused by top-down effects from association areas reflecting the withdrawal of attentional resources from the non-stimulated modality, or via direct connections between sensory areas. Previous neurophysiological studies in animals have demonstrated that visual and auditory stimuli induce activations in auditory cortices with distinct layer-dependent profiles (feedforward/granular vs. backward/infra+supragranular). This study used 7T fMRI to resolve BOLD responses at different cortical depths and characterize the effects of sensory stimulation and modality-specific attention on the cortical layer-specific activation profiles. Methods: Four participants took part in this fMRI experiment at 7T (MAGNETOM 7T). In a 3 (stimulation modality) X 2 (attention) design, they were presented with visual (V, concentric looming white circles on a black background), auditory (A, looming frequency-modulated pure tones) or audio-visual stimuli (AV). They attended and responded to targets either in the visual or auditory modality. Using a 24 channel phased array head coil (Nova Medical Inc, Wilmington MA, USA), we acquired a whole-brain T1 map (MP2RAGE (Marques et al. 2010); spatial resolution: (0.7 mm)3; TR=5000 ms; TE=2.45 ms; TI1/2=900 ms/2750 ms; FA1/2 = 5\(^{\circ}\)/3\(^{\circ}\); iPAT=2) and 46 axial EPI slices with an axial coverage of 3.6 cm that included the entire primary visual and auditory cortices and much of the posterior superior temporal gyri (spatial resolution: (0.75 mm)3; TR=3000 ms; TE=25 ms; FA=90; iPAT=4). Primary and secondary visual cortices were identified by retinotopic mapping. Four regions of interest were defined including left and right Heschl's gyri, the planum temporale, the left/right superior temporal gyri and the left/right primary auditory cortices (i.e. TE 1.0 based on cytoarchitectonic probability maps). For cortical depth-dependent analysis, the cortex was segmented, upsampled to a (0.4 mm)3 resolution and automatically contoured into 6 laminae using the CBS Tools and MIPAV (Waehnert et al. 2013). Using SPM, the functional data were realigned, unwarped, coregistered to the up-sampled whole-brain T1 map and regridded to (0.4 mm)3 resolution. The subject-specific GLM analysis included 7 regressors, modeling the 6 conditions of the 3 X 2 design, target onsets and subject responses with a canonical HRF and its temporal derivative. Contrast images were computed for each of the 3 X 2 conditions. Parameter estimates for each contrast were averaged for each layer of each ROI and then across subjects. Results: Overall mean accuracy was above 90\%, assuring participant compliance with the attention modulation instructions. Preliminary fMRI analyses revealed: 1. Cross-modal deactivations: auditory stimuli induced deactivations in primary visual areas, as did visual stimuli in primary auditory areas. 2. Attention amplified the sensory evoked activations and deactivations in primary sensory areas. 3. Audiovisual interactions (i.e. AV \(\neq\) A + V) were sub-additive. 4. The cortical profile showed an activation gradient, maximal at the cortical surface. Conclusions: The study replicated cross-modal deactivations operating from vision to audition and vice versa. Moreover, we demonstrate that these cross-modal deactivations are amplified when attention is directed to the sensory input. Further analyses will be needed to delineate how much attention and sensory modality modulate the layer-specific activation profiles.}, department = {Research Group Noppeney}, web_url = {https://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3565}, event_place = {Hamburg, Germany}, event_name = {20th Annual Meeting of the Organization for Human Brain Mapping (OHBM 2014)}, author = {Gau, R and Trampel, R and Bazin, P-L and Turner, R and Noppeney, U} } @Poster { RoheN2014, title = {A cortical hierarchy performs Bayesian Causal Inference for multisensory perception}, year = {2014}, month = {6}, day = {11}, volume = {20}, number = {4050}, abstract = {Introduction: To form a reliable percept of the multisensory environment, the brain integrates signals across the senses. However, it should integrate signals only when caused by a common source, but segregate those from different sources (Shams and Beierholm, 2010). Bayesian Causal inference provides a rational strategy to arbitrate between information integration and segregation: In the case of a common source, signals should be integrated weighted by their sensory reliability (Ernst and Banks, 2002; Alais and Burr, 2004; Fetsch et al., 2012). In case of separate sources, they should be processed independently. Yet, in everyday life, the brain does not know whether signals come from common or different sources, but needs to infer the probabilities of these casual structures from the sensory signals. A final estimate can then be obtained by averaging the estimates under the two causal structures weighted by their posterior probabilities (i.e. model averaging). Indeed, human observers locate audiovisual signal sources by combining the spatial estimates under the assumptions of common and separate sources weighted by their probabilities (Kording et al., 2007). Yet, the neural basis of Bayesian Causal Inference during spatial localization remains unknown. This study combines Bayesian Modeling and multivariate fMRI decoding to characterize how Bayesian Causal Inference is performed by the auditory and visual cortical hierarchies (Fig. 1A-C). Methods: Participants (N = 5) were presented with auditory and visual signals that were independently sampled from four locations along the azimuth. The spatial reliability of the visual signal was high or low. In a selective attention paradigm, participants localized either the auditory or the visual spatial signal. After fitting the Bayesian causal inference model to participants' localization responses, we obtained condition-specific auditory and visual spatial estimates under the assumption of (i) common (SAV,C=1) and (ii) separate sources (SA,C=2, SV,C=2) and (iii) the final combined spatial estimate after model averaging (SA, SV), i.e. five spatial estimates in total (Fig. 1C). Using cross-validation, we trained a support vector regression model to decode these auditory or visual spatial estimates from fMRI voxel response patterns in regions along the visual and auditory cortical hierarchies. We evaluated the decoding accuracy for each spatial estimate in terms of the correlation coefficient between the spatial estimate decoded from fMRI and predicted from the Bayesian Causal Inference model. To determine the spatial estimate that is primarily encoded in a region, we next computed the exceedance probability that a correlation coefficient of one spatial estimate was greater than any of the other spatial estimates (Fig. 1D). Results: Bayesian Causal Inference emerged along the auditory and visual hierarchies: Lower level visual and auditory areas encoded auditory and visual estimates under the assumption of separate sources (i.e. information segregation). Posterior intraparietal sulcus (IPS1-2) represented the reliability-weighted average of the signals under common source assumptions. Anterior IPS (IPS3-4) represented the task-relevant auditory or visual spatial estimate obtained from model averaging. Conclusions: This is the first demonstration that the computational operations underlying Bayesian Causal Inference are performed by the human brain in a hierarchical fashion. Critically, the brain explicitly encodes not only the spatial estimates under the assumption of full segregation (primary visual and auditory areas), but also under forced fusion (IPS1-2). These spatial estimates under the causal structures of common and separate sources are then averaged into task-relevant auditory or visual estimates according to model averaging (IPS3-4). Our study provides a novel hierarchical perspective on multisensory integration in human neocortex.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {https://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3565}, event_place = {Hamburg, Germany}, event_name = {20th Annual Meeting of the Organization for Human Brain Mapping (OHBM 2014)}, author = {Rohe, T and Noppeney, U} } @Poster { LeitaoTTN2014, title = {Using TMS-fMRI to investigate the neural correlates of visual perception}, year = {2014}, month = {6}, day = {11}, volume = {20}, number = {4164}, abstract = {Introduction: Despite sustained attention, weak sensory event often evade our perceptual awareness. The neural mechanisms that determine whether a stimulus is consciously perceived remain poorly understood. Conscious visual perception is thought to rely on a widespread neural system encompassing primary and higher order visual areas, frontoparietal areas and subcortical regions such as the thalamus. This concurrent TMS-fMRI study applied TMS to the right anterior intraparietal sulcus (IPS) and in a sham control to investigate how perturbations to IPS influence the neural systems underlying visual perception of weak sensory events. Methods: 7 subjects took part in the concurrent TMS-fMRI experiment (3T Siemens Magnetom Tim Trio System, GE-EPI, TR = 3290ms, TE = 35ms, 40 axial slices, size = 3mm x 3mm x 3.3mm). The 2x2x2 factorial design manipulated: (i) visual target (present, absent), (ii) visual percept (yes, no) and (ii) TMS condition (IPS, Sham). In a visual target detection task, subjects fixated a cross in the centre of the screen. On 50\% of the trials a weak visual target was presented in their left lower visual field. Subjects were instructed to answer 'yes' only when completely sure. Visual stimuli were individually tailored to yield a detection threshold of 70\% in visual present trials. Bursts of 4 TMS pulses (10Hz) were applied in image acquisition gaps at 100ms after each trial onset over the right IPS (x=42.3, y=-50.3, z=64.4) and during a sham condition using a MagPro X100 stimulator (MagVenture, Denmark) and a MR-compatible figure of eight TMS coil (MRi-B88). Stimulation intensity was 69\% for IPS and was adjusted during Sham stimulation to evoke similar side effects. Trials were presented in blocks of 12 that were interleaved with baseline periods of 13s. Each run consisted of 7 blocks with 4 runs per TMS condition, giving a total of 168 trials per condition. Each TMS condition was performed in different sessions and all conditions were counterbalanced across subjects. Behavioral responses were categorized in hit, miss, false alarm and correct rejection (CR). Performance measures for each category were computed separately for IPS- and Sham-TMS and averaged across subjects. While each condition was modelled at the 1st level (using SPM8), 2nd level random effects analyses (one-sample t-tests) were restricted to target present trials (i.e. hits, misses). We tested for the main effects of TMS, visual percept and their interaction. Results are reported at p<0.05 at cluster level corrected for the whole brain using an auxiliary uncorrected voxel threshold of p=0.01. Conclusions: Visual detection involves perceptual decisions based on uncertain sensory representations. As participants set a high criterion for determining whether they are aware of targets, missed trials were associated with more uncertainty as indexed by long response times and thereby placed more demands on decisional processes. TMS to IPS perturbed this neural system involved in perceptual decisions and awareness. Critically, while the right precentral/middle frontal gyrus associated with the frontal eye field usually discriminates between hits and misses, TMS-IPS abolishes this difference in activation indicating that IPS-FEF closely interact in perceptual awareness and decisions.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, department3 = {Department Scheffler}, web_url = {https://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3565}, event_place = {Hamburg, Germany}, event_name = {20th Annual Meeting of the Organization for Human Brain Mapping (OHBM 2014)}, author = {Leitao, J and Thielscher, A and Tuennerhoff, J and Noppeney, U} } @Poster { LeitaoTN2013, title = {Influences of right parietal cortex in the visual processing of contralateral visual stimuli in a sustained attentional context}, year = {2013}, month = {11}, day = {12}, volume = {43}, number = {662.17}, abstract = {Visuospatial attention is essential for successful interactions with the environment. It has been shown that visuospatial attention is based on a right lateralized network of parietal and frontal areas. Indeed, insights about this network arise from studies with visual neglect patients, who after a localized lesion in right parietal or temporal areas fail to perceive or attend normally to signals in the contralateral left visual hemifield. Accumulating evidence shows that transcranial magnetic stimulation (TMS) over the parietal cortex is able to induce neglect-like changes in performance during visuopatial tasks. At the neural level, parietal TMS has also been shown to modulate activity in remote interconnected areas of the brain, in particular in the occipito-temporal cortex. This study used concurrent TMS-fMRI to investigate the role of the right intraparietal sulcus (IPS) in visual detection under spatial attention. Participants performed a visual target detection task during TMS-stimulation to the right anterior IPS and Sham-TMS-stimulation where specific TMS effects were abolished by placing a 2cm thick plastic plate between the TMS-coil and the participant’s head. In both conditions, TMS was applied in bursts of 4 pulses (10Hz), starting 90ms after the target onset. Participants fixated on a cross in the centre of the screen and attended to a location indicated by a placeholder in the left lower visual field. On each trial, they indicated whether they detected a small visual stimulus that was presented inside the placeholder on 50\% of the trials. Blocks of 12 trials were interleaved with baseline periods of 13s. Hence, attention was sustained specifically to the left visual field throughout the entire block relative to baseline. As expected, compared to baseline, attended periods activated the network of fronto-parietal areas commonly involved in attention. Critically, IPS-TMS relative to Sham-TMS significantly decreased the difference in activations between visual present and visual absent trials by reducing the deactivations during visual present trials in the right anterior fusiform gyrus, an area that has previously been reported to be modulated by attention. Our results show that the right human parietal cortex influences visual processing in the right ipsilateral occipital cortex by modulating stimulus evoked (de)activations during spatial attention.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, department3 = {Department Scheffler}, web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013}, event_place = {San Diego, CA, USA}, event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)}, author = {Leit{\~a}o, J and Thielscher, A and Noppeney, U} } @Poster { RemiN2013, title = {The left prefrontal cortex controls information integration by combining bottom-up inputs and top-down predictions}, year = {2013}, month = {11}, day = {12}, volume = {43}, number = {550.06}, abstract = {In the natural environment our senses are bombarded with many different signals. To form a coherent percept, the brain should integrate signals originating from a common source and segregate signals from different sources. This psychophysics-fMRI study investigated how the human brain combines bottom-up inputs (i.e. congruent VS incongruent signals) and top-down prior predictions (i.e. common source prior) to infer whether sensory signals should be integrated or segregated. Sixteen participants were shown audio-visual movies of congruent (e.g. visual «Ti» with auditory /Ti/), incongruent (e.g. visual «Ti» with auditory /Pi/) and McGurk syllables (e.g. visual «Ki» with auditory /Pi/, which can be fused into the illusionary percept “Ti”). Critically, we manipulated participants’ top-down predictions (i.e. common source prior) by presenting the McGurk stimuli in a series of congruent or incongruent syllables. On each trial, participants reported their syllable percept in forced choice procedure with 6 response options. At the behavioural level, participants were more likely to fuse auditory and visual signals of a McGurk trial into an illusionary percept in congruent relative to incongruent contexts. This response profile indicates that participant’s prior top-down predictions (i.e. common source prior) influence whether or not they integrate sensory signals into a coherent percept. At the neural level, incongruent relative to congruent bottom-up inputs increased activations in a widespread left-lateralised fronto-parietal network. The left prefrontal activations also increased for McGurk trials, when participants selectively reported their auditory percept and did not fuse auditory and visual McGurk signals into a unified percept. Critically, this effect was enhanced for incongruent contexts when participants expected that sensory signals are incongruent and needed to be segregated. Collectively, our results demonstrate that the left inferior frontal sulcus determines whether sensory signals should be integrated or segregated by combining (i) top-down predictions generated from prior incongruent trials with (ii) bottom-up information about sensory conflict in the incoming signals. Furthermore, it exerts top-down control that enables participants to process sensory signals independently and selectively report their percept in one sensory (i.e. here auditory) modality.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013}, event_place = {San Diego, CA, USA}, event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)}, author = {Gau, R and Noppeney, U} } @Poster { RoheN2013_2, title = {Intraparietal sulcus forms multisensory spatial priority maps}, year = {2013}, month = {11}, day = {11}, volume = {43}, number = {456.14}, abstract = {To form an accurate percept of the environment the brain integrates sensory signals weighted by their relative reliabilities (Ernst and Banks, 2002). Indeed, recent neurophysiological research has demonstrated that activity of multisensory MSTd neurons during a heading task is modulated by changes in cue reliability in line with predictions of optimal integration (Fetsch et al., 2012). Moreover, top-down influences like task-relevance modulate multisensory perception (Bertelson and Radeau, 1981). The current study investigated how the human brain integrates audiovisual signals into spatial representations depending on their relative reliability and task-relevance. Using fMRI, we characterized how these integration processes emerged along the visual and auditory processing hierarchies. Subjects (N = 5) were presented with synchronous audiovisual signals that were spatially congruent or discrepant at 4 positions along the azimuth. We manipulated visual reliability (low vs. high) and task-relevance (auditory vs. visual-selective localization). Multivariate decoding of spatial information from fMRI data revealed that multisensory influences on spatial representations were present already at the primary cortical level and progressively increased along the cortical hierarchies. Likewise, the influence of task-relevance increased. Most prominently, the intraparietal sulcus integrated audiovisual signals weighted by their relative reliabilities and task-relevance. Further, IPS showed the greatest correlation with participant’s behavioral crossmodal bias. Collectively, the results suggest that IPS forms a spatial priority map (Bisley and Goldberg, 2010) by integrating sensory signals weighted by their bottom-up reliability and top-down task-relevance.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013}, event_place = {San Diego, CA, USA}, event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)}, author = {Rohe, T and Noppeney, U} } @Poster { RoheN2013, title = {Causal inference conditions reliability-weighted integration of audiovisual spatial signals}, year = {2013}, month = {9}, pages = {170}, abstract = {To form coherent and reliable multisensory percepts of the environment, human observers have to segregate multisensory signals caused by independent sources but integrate those from a common source. Models of causal inferences (Kording et al., 2007) predict the inference of a common cause if the signals are close in space and time. Further, models of optimal reliability-weighted integration predict that multisensory signals are weighed proportional to their relative reliability in order to maximize the reliability of the integrated percept (Ernst \& Banks, 2002). To probe models of causal inference and reliability-weighted integration, we presented subjects (N = 26) with audiovisual spatial cues and manipulated spatial disparity and visual reliability. Subjects were required to selectively localize the auditory cues and to judge the spatial unity of the cues. Indices of audiovisual spatial integration showed that audiovisual spatial cues were weighted proportional to visual reliability, but only if a common cause was inferred. Likewise, localization reliability increased with visual reliability in case of a common-cause inference. Computational models incorporating causal inferences and reliability-weighted integration provided superior fit to auditory-localization data compared to models implementing only reliability-weighted integration. The results suggest that reliability-weighed integration is conditioned on the outcome of the causal inference.}, department = {Department B{\"u}lthoff}, web_url = {https://portal.g-node.org/abstracts/bc13/\#/doi/nncn.bc2013.0176}, event_place = {T{\"u}bingen, Germany}, event_name = {Bernstein Conference 2013}, DOI = {10.12751/nncn.bc2013.0176}, author = {Rohe, T and Noppeney, U} } @Poster { GauN2013, title = {The left prefrontal cortex controls information integration by combining bottom-up inputs and top-down predictions}, year = {2013}, month = {9}, pages = {169}, abstract = {In the natural environment our senses are bombarded with many different signals. To form a coherent percept, the brain should integrate signals originating from a common source and segregate signals from different sources. This fMRI study investigated how humans combine bottom-up inputs (i.e. congruent VS incongruent signals) and top-down predictions (i.e. common source prior) to infer if sensory signals should be integrated. Sixteen participants were shown movies of congruent (e.g. visual Ti with auditory Ti), incongruent (e.g. visual Ti with auditory Pi) and McGurk syllables (e.g. visual Ki with auditory Pi, which can be fused into the illusionary percept Ti). We manipulated participants’ top-down predictions by presenting the McGurk stimuli in a series of congruent or incongruent syllables. Participants reported their syllable percept in forced choice procedure with 6 response options. At the behavioural level, participants were more likely to fuse auditory and visual signals of a McGurk trial into an illusionary percept in congruent relative to incongruent contexts. This indicates that participant’s top-down predictions influence whether or not they integrate sensory signals. At the neural level, incongruent relative to congruent bottom-up inputs increased activations in a left fronto-parietal network. The left prefrontal activations also increased for McGurk trials, when participants selectively reported their auditory percept and did not fuse auditory and visual signals. This effect was enhanced for incongruent contexts when participants expected that sensory signals needed to be segregated. Our results show that the left inferior frontal sulcus determines whether sensory signals should be integrated by combining top-down predictions generated from prior trials with bottom-up information about sensory conflict in the incoming signals. Furthermore, it exerts top-down control enabling independent sensory processing and report of only one sensory modality.}, department = {Department B{\"u}lthoff}, web_url = {https://portal.g-node.org/abstracts/bc13/\#/doi/nncn.bc2013.0174}, event_place = {T{\"u}bingen, Germany}, event_name = {Bernstein Conference 2013}, DOI = {10.12751/nncn.bc2013.0174}, author = {Gau, R and Noppeney, U} } @Poster { EndresANG2013, title = {Connecting Brain and Mind with Formal Concept Analysis: a Data-Driven Investigation of the Semantic, Explicit Coding Hypothesis}, year = {2013}, month = {3}, day = {13}, pages = {1015-1016}, abstract = {Understanding how semantic information is represented in the brain has been an important research focus of neuroscience in the past few years. Unlike 'traditional' neural (de)coding approaches, which study the relationship between stimulus and neural response, we are interested in higher-order relational coding: we ask how perceived relationships between stimuli (e.g. similarity) are connected to corresponding relationships in the neural activity. Our approach addresses the semantical problem, i.e. how terms (here stimuli) come to have their (possibly subjective) meaning, from the perspective of the network theory of semantics (Churchland 1984). This theory posits that meaning arises from the network of concepts within which a given term is embedded. We showed previously (Endres et al 2010, AMAI) that Formal Concept Analysis (FCA, (Ganter \& Wille 1999)) can reveal interpretable semantic information (e.g. specialization hierarchies, or feature-based representation) from electrophysiological data. Unlike other analysis methods (e.g. hierarchical clustering), FCA does not impose inappropriate structure on the data. FCA is a mathematical formulation of the explicit coding hypothesis (Foldiak, 2009, Curr. Biol.) Here, we investigate whether similar findings can be obtained from fMRI BOLD responses recorded from human subjects. While the BOLD response provides only an indirect measure of neural activity on a much coarser spatio-temporal scale than electrophysiological recordings, it has the advantage that it can be recorded from humans, which can be questioned about their perceptions during the experiment, thereby obviating the need of interpreting animal behavioural responses. Furthermore, the BOLD signal can be recorded from the whole brain simultaneously. In our experiment, a single human subject was scanned while viewing 72 grayscale pictures of animate and inanimate objects in a target detection task (Siemens Trio 3T scanner, GE-EPI, TE=40ms, 38 axial slices, TR=3.08s, 48 sessions, amounting to a total of 10,176 volume images). These pictures comprise the formal objects for FCA. We computed formal attributes by learning a hierarchical Bayesian classifier, which maps BOLD responses onto binary features, and these features onto object labels. The connectivity matrix between the binary features and the object labels can then serve as the formal context. In line with previous reports, FCA revealed a clear dissociation between animate and inanimate objects in a high-level visual area (inferior temporal cortex, IT), with the inanimate category including plants. The inanimate category was subdivided into plants and non-plants when we increased the number of attributes extracted from the fMRI responses. FCA also highlighted organizational differences between the IT and the primary visual cortex, V1. We show that subjective familiarity and similarity ratings are strongly correlated with the attribute structure computed from the fMRI signal (Endres et al. 2012, ICFCA).}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {https://www.nwg-goettingen.de/2013/default.asp?id=4}, event_place = {G{\"o}ttingen, Germany}, event_name = {10th G{\"o}ttingen Meeting of the German Neuroscience Society, 34th G{\"o}ttingen Neurobiology Conference}, author = {Endres, DM and Adam, R and Noppeney, U and Giese, MA} } @Poster { RoheN2012_2, title = {Neural audiovisual representations of space in sensory and higher multisensory cortices}, year = {2012}, month = {10}, day = {15}, volume = {42}, number = {463.12}, abstract = {Previous research has demonstrated that human observers locate audiovisual signals in space by averaging auditory (A) and visual (V) spatial signals according to their relative sensory reliabilities (=inverse of variance) (Ernst \& Banks, 2002; Alais \& Burr, 2004). This form of audiovisual integration is optimal in that it provides the most reliable percept.Yet, the neural systems mediating integration of spatial inputs remain unclear. Multisensory integration of spatial signals has previously been related to higher order association areas such as intraparietal sulcus (IPS) as well as the planum temporale (PT; Bonath et al., 2007). In the current fMRI study, we investigated whether and how early sensory (auditory cortex (A1), PT; visual regions V1-V3) and higher association (IPS) areas represent A and V spatial information. Subjects were presented with synchronous audiovisual signals, at spatially congruent or discrepant locations along the azimuth and at two levels of sensory reliability. Hence, the experimental design factorially manipulated: (1) V location, (2) A location, (3) V reliability. Subjects’ task was to localize the A signal. At the behavioral level, the perceived location of the A input was shifted towards the location of the V input depending on the relative A and V reliabilities. Likewise, at the neural level, the spatial location decoded with linear support vector machines from fMRI signals in brain areas along the A and V processing hierarchies was determined by the relative sensory reliabilities. The spatial location decoded from A1/PT was determined primarily by A spatial information with a stronger influence from V spatial information when the V reliability was high. Conversely, the spatial location decoded from visual areas (V1, V2, V3) and IPS was determined primarily by V spatial information with a stronger A influence when the V information was less reliable. In conclusion, our results suggest that the brain represents audiovisual spatial location in qualitative agreement with reliability-weighted multisensory integration at multiple levels of the cortical processing hierarchy.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.abstractsonline.com/Plan/ViewAbstract.aspx?sKey=34e7b831-d352-4852-a1ed-800cacae4eba\&cKey=ffc383d5-0410-4855-bcbb-88c13e130cff\&mKey=70007181-01c9-4de9-a0a2-eebfa14cd9f1}, event_place = {New Orleans, LA, USA}, event_name = {42nd Annual Meeting of the Society for Neuroscience (Neuroscience 2012)}, author = {Rohe, T and Noppeney, U} } @Poster { EndresANG2012_2, title = {Explicit coding in the brain: data-driven semantic analysis of human fMRI BOLD responses with Formal Concept Analysis}, journal = {Frontiers in Computational Neuroscience}, year = {2012}, month = {9}, day = {14}, volume = {Conference Abstract: Bernstein Conference 2012}, pages = {166}, abstract = {Understanding how semantic information is represented in the brain has been an important research focus of neuroscience in the past few years. We showed previously (Endres et al 2010) that Formal Concept Analysis (FCA, (Ganter and Wille 1999)) can reveal interpretable semantic information (e.g. specialization hierarchies, or feature-based representation) from electrophysiological data. Unlike other analysis methods (e.g. hierarchical clustering), FCA does not impose inappropriate structure on the data. FCA is a mathematical formalism compatible with the explicit coding hypothesis (Foldiak, 2009) Here, we investigate whether similar findings can be obtained from fMRI BOLD responses recorded from human subjects. While the BOLD response provides only an indirect measure of neural activity on a much coarser spatio-temporal scale than electrophysiological recordings, it has the advantage that it can be recorded from humans, which can be questioned about their perceptions during the experiment, thereby obviating the need of interpreting animal behavioural responses. Furthermore, the BOLD signal can be recorded from the whole brain simultaneously. In our experiment, a single human subject was scanned while viewing 72 grayscale pictures of animate and inanimate objects in a target detection task (Siemens Trio 3T scanner, GE-EPI, TE=40ms, 38 axial slices, TR=3.08s, 48 sessions, amounting to a total of 10,176 volume images). These pictures comprise the formal objects for FCA. We computed formal attributes by learning a hierarchical Bayesian classifier, which maps BOLD responses onto binary features, and these features onto object labels. The connectivity matrix between the binary features and the object labels can then serve as the formal context. In line with previous reports, FCA revealed a clear dissociation between animate and inanimate objects in a high-level visual area (inferior temporal cortex, IT), with the inanimate category including plants. The inanimate category was subdivided into plants and non-plants when we increased the number of attributes extracted from the fMRI responses. FCA also highlighted organizational differences between the IT and the primary visual cortex, V1. We show that subjective familiarity and similarity ratings are strongly correlated with the attribute structure computed from the fMRI signal.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00056/event_abstract}, event_place = {M{\"u}nchen, Germany}, event_name = {Bernstein Conference 2012}, DOI = {10.3389/conf.fncom.2012.55.00056}, author = {Endres, D and Adam, R and Noppeney, U and Giese, MA} } @Poster { RoheN2012_3, title = {Intraparietal sulcus represents audiovisual space}, journal = {Frontiers in Computational Neuroscience}, year = {2012}, month = {9}, day = {14}, volume = {Conference Abstract: Bernstein Conference 2012}, pages = {192-193}, abstract = {Previous research has demonstrated that human observers locate audiovisual (AV) signals in space by averaging auditory (A) and visual (V) spatial signals according to their relative sensory reliabilities (=inverse of variance) (Ernst \& Banks, 2002; Alais \& Burr, 2004). This form of AV integration is optimal in that it provides the most reliable percept. Yet, the neural systems mediating integration of spatial inputs remain unclear. Multisensory integration of spatial signals has previously been related to higher order association areas such as intraparietal sulcus (IPS) as well as early sensory areas like the planum temporale (Bonath et al., 2007). In the current fMRI study, we investigated whether and how early visual (V1-V3) and higher association (IPS) areas represent A and V spatial information given their retinotopic organization. One subject was presented with synchronous audiovisual signals, at spatially congruent or discrepant locations along the azimuth and at two levels of sensory reliability. Hence, the experimental design factorially manipulated: (1) V location, (2) A location, (3) V reliability. The subject’s task was to localize the A signal. Retinotopic maps in visual areas and IPS were measured with standard wedge and ring checkerboard stimuli. At the behavioral level, the perceived location of the A input was shifted towards the location of the V input depending on the relative A and V reliabilities. At the neural level, the cue locations represented in retinotopic maps were decoded by computing a population vector estimate (Pouget et al., 2000) from the voxels’ BOLD responses to the AV cues given the voxels’ preferred visual field coordinate. In early visual areas (V1-V3), the decoded cue locations were determined by the V spatial signal but were independent from the A spatial signal. In IPS, the decoded cue locations were determined by the V and the A spatial signals if relative V reliability was low. In conclusion, our results suggest that the brain represents AV spatial location in IPS in qualitative agreement with reliability-weighted multisensory integration.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00054/event_abstract}, event_place = {M{\"u}nchen, Germany}, event_name = {Bernstein Conference 2012}, DOI = {10.3389/conf.fncom.2012.55.00054}, author = {Rohe, T and Noppeney, U} } @Poster { GianiN2012, title = {Awareness related auditory scene analysis: A processing cascade enables a tone pair to be segregated from background and enter awareness}, year = {2012}, month = {8}, pages = {71-72}, abstract = {In daily life, our auditory system detects and segregates sounds, derived from complex auditory scenes. Yet, limited processing capacities allow only a small subset of these sounds to enter awareness. This MEG study used informational masking to investigate the neural mechanisms that enable auditory awareness. On each trial, subjects indicated whether they detected a target that was embedded in a multi-tone background in 67\% of the trials. Targets were defined as a pair of two 40 Hz amplitude-modulated tones, presented sequentially with a fixed SOA of 1050 ms. Hence, target detection required subjects to perceive both tones within a pair (Fig. 1B). We compared MEG activity for hits and misses separately for target tone 1 and 2 both in sensor and source space (Fig. 1A). Successful target detection was associated with changes in transient evoked source activity in bilateral auditory cortices at 3 stages: (1) an enhanced M50 component for tone 1,(2)a negative component at \(\sim\) 150ms for tone 2 and (3) a later, long-latency negativity for both tone 1 and 2 at \(\sim\) 300ms (Fig. 1C). Moreover, subjects’ perceptual sensitivity (d) positively correlated with the magnitude of the M150 component. In addition, we investigated whether steady-state activity was modulated by awareness. Indeed, even though all target tones elicited 40 Hz steady-state responses, the amplitude of 40Hz activity was significantly enhanced when subjects became aware of tone 1 and 2. In conclusion, our results suggest that awareness of a two-tone pair relies on a cascade of processes that segregate this pair from a complex auditory scene. (1) The processing of detected tones is enhanced as indicated by an increased M50 and steady-state response. (2) The sequential integration of the target pair after the 2nd tone then elicits an awareness related negativity at \(\sim\) 150ms. (3) Finally, aware signals may elicit additional attentional processes, which may be reflected in the enhanced long-latency negativity.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://megfront.meg.chups.jussieu.fr/biomag2012BookofAbstracts.pdf}, event_place = {Paris, France}, event_name = {18th International Conference on Biomagnetism (BIOMAG 2012)}, author = {Giani, AS and Noppeney, U} } @Poster { TsiatsisN2012, title = {Interactions between Transient Auditory and Steady State Visual Stimuli}, year = {2012}, month = {8}, pages = {277-278}, abstract = {Neuronal oscillations are considered crucial for information processing in the brain as they can potentially regulate information flow and dynamically bind different cortical regions. This MEG study investigated the interactions between a transient sound and a steady state visual signal. To induce steady state oscillations in the visual cortex, we presented subjects with a continuous visual signal that was luminance-modulated at 10Hz. The transient sounds were presented locked to four different equidistant phases of the periodic visual stimulus (i.e. 0, pi, pi, pi). More specifically, our experimental design factorially manipulated (i) the presence/absence of the auditory input and (ii) the phase of the visual input thus providing four types of phase-dependent AV(a) and V(a) trials, where a denotes the specific phase of the visual stimulus In addition, the design included pure auditory (A) and ’fixation’ trials. This allowed us to dissociate non-specific and specific phase-dependent audiovisual interactions. Non-specific interactions were identified by comparing AV(a) + Fixation vs A + V(a). In the frequency domain, this revealed increased activity and phase locking at 10 Hz during the audiovisual conditions. Specific phase-dependent interactions [AV(a) 4 V(a)] were revealed in the time domain at about 300 ms poststimulus, where activity was enhanced mainly for synchronous audiovisual trials (phase 0). Collectively, our results suggest that auditory transients and visual steady signals interact in a non-specific and in a phase-dependent fashion.}, department = {Research Group Noppeney}, web_url = {http://megfront.meg.chups.jussieu.fr/biomag2012BookofAbstracts.pdf}, event_place = {Paris, France}, event_name = {18th International Conference on Biomagnetism (BIOMAG 2012)}, author = {Tsiatsis, P and Noppeney, U} } @Poster { LeeN2012, title = {Leading or Lagging: Temporal prediction errors are expressed in auditory and visual cortices}, year = {2012}, month = {6}, number = {997}, abstract = {Introduction: In our natural environment our brain is exposed to a constant influx of multisensory signals that dynamically evolve at multiple timescales. Statistical regularities are important cues informing the brain whether two sensory signals are generated by a common physical process and should hence be integrated. This fMRI study investigated how the brain detects violations of these statistical regularities induced by the temporal misalignment of the visual and auditory signals. Specifically, we arbitrated between two hypotheses that make opposite predictions: Under the predictive coding framework the brain iteratively optimizes an internal model of its multisensory environment by reducing the error between its predictions and the sensory inputs. An audiovisual misalignment that violates the natural statistical regularities should thus induce a prediction error signal. For visual leading asynchrony, we would expect a prediction error signal in the auditory cortex, because the delayed auditory signal violates the temporal predictions of the 'leading' visual system (vice versa for auditory leading asynchrony) [2,3]. Alternatively, from the perspective of the biased competition model, the misaligned auditory and visual signals compete for processing resources. For visual leading asynchrony, we would expect an increased BOLD-signal in the visual system indexing the higher salience of the leading visual signal which then suppresses the temporally incompatible auditory signal [1]. Methods: 37 subjects participated in this fMRI study (Siemens TimTrio 3T scanner, GE-EPI, TE = 40 ms, 42 axial slices, TR = 3s). They passively perceived audiovisual movies of natural speech, sinewave speech (SWS) and piano music. The audiovisual signals were synchronous, auditory leading (+240ms) or visual leading (-240ms). Hence, the 3 x 3 factorial design manipulated (i) temporal alignment (3 levels) and (ii) stimulus class (3 levels). The activation trials were interleaved with 8s fixation blocks. To allow for random-effects analyses, contrast images (single condition > fixation) for each subject were entered into a 2nd level ANOVA, which modelled the 9 effects in our 3 X 3 design. 1. Using a conjunction null conjunction analysis, we identified differences between auditory and visual leading conditions that are common to speech, SWS and music. 2. We tested for asynchrony effects (i.e. auditory leading > synchronous, visual leading > synchronous) separately for each stimulus class. Results are reported at p<.05 corrected for multiple comparisons at the cluster level using a height threshold of p<.001 uncorrected. Results: 1. Common for all stimulus classes, auditory leading relative to visual leading signals increased activations in bilateral V5/hMT+. In contrast, visual leading relative to auditory leading signals increased activations in bilateral Heschl's gyri (Fig. 1). 2. Auditory leading relative to synchronous AV signals increased activations in the auditory system extending from Heschl's gyrus into posterior superior temporal sulcus/gyrus (STS/STG) bilaterally. Conversely, visual leading relative to synchronous signals increased activations in bilateral occipito-temporal cortices predominantly in V5/hMT+ (Fig. 2).}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2012/OHBM-2012-Lee.pdf}, department = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageid=3458}, event_place = {Beijing, China}, event_name = {18th Annual Meeting of the Organization for Human Brain Mapping (OHBM 2012)}, author = {Lee, HL and Noppeney, U} } @Poster { AdamEGN2011, title = {Semantic relationships in the tool-selective network revealed by formal concept analysis}, year = {2011}, month = {11}, volume = {41}, number = {486.07}, abstract = {How are objects represented in the human brain? This question has previously been addressed using encoding and decoding approaches to neural activity as indexed by the fMRI BOLD response. Specifically, hierarchical clustering based on similarity matrices of brain activation patterns demonstrated that object representations within the inferior temporal gyrus clustered into animate and inanimate categories in line with previous neuropsychological double dissociations. Here, we used Formal Concept Analysis (FCA) (Ganter and Wille 1999) to characterize how the relationship of BOLD activation patterns maps onto the relationship between object stimuli. FCA displays and interprets the relationship of neural object representations via concept lattices (a type of semantic graph). Each concept is defined by a set of formal objects as extent and a set of formal attributes as intent. In our application, the object stimuli were the formal objects and the binarized activations in single voxels the formal attributes. A single subject was scanned while viewing 72 grayscale pictures of animate and inanimate objects in a target detection task (Siemens Trio 3T scanner, GE-EPI, TE=40ms, 38 axial slices, TR=3.08s, 48 sessions, amounting to a total of 10,176 volume images). We modeled the BOLD responses to the stimulus presentations in an event-related fashion with a general linear model, using a separate regressor for each of the 72 stimuli. From the parameter estimate image for each stimulus, the 300 voxels that were most active for all stimuli > fixation were selected within the category-sensitive system as defined by a prior study (including inferior temporal, supramarginal, inferior frontal gyrus). Formal concept analysis was applied to patterns of thresholded and hence binarized voxel activations. In line with previous reports, formal concept analysis revealed a dissociation between animate and inanimate objects with the inanimate category also including plants and vegetables. This study demonstrates the potential strength of FCA for decoding structured relationships in fMRI data.}, department = {Research Group Noppeney}, web_url = {http://www.sfn.org/am2011/}, event_place = {Washington, DC, USA}, event_name = {41st Annual Meeting of the Society for Neuroscience (Neuroscience 2011)}, author = {Adam, R and Endres, A and Giese, MA and Noppeney, U} } @Poster { LeoN2011, title = {Conditioning influences audio-visual integration by increasing sound saliency}, journal = {i-Perception}, year = {2011}, month = {10}, day = {17}, volume = {2}, number = {8}, pages = {762}, abstract = {We investigated the effect of prior conditioning an auditory stimulus on audiovisual integration in a series of four psychophysical experiments. The experiments factorially manipulated the conditioning procedure (picture vs. monetary conditioning) and multisensory paradigm (2AFC visual detection vs. redundant target paradigm). In the conditioning sessions, subjects were presented with three pure tones (= conditioned stimulus, CS) that were paired with neutral, positive or negative unconditioned stimuli (US, monetary: +50 euro cents, -50 cents, 0 cents; pictures: highly pleasant, unpleasant and neutral IAPS). In a 2AFC visual selective attention paradigm, detection of near-threshold Gabors was improved by concurrent sounds that had previously been paired with a positive (monetary) or negative (picture) outcome relative to neutral sounds. In the redundant target paradigm, sounds previously paired with positive (monetary) or negative (picture) outcomes increased response speed to both auditory and audiovisual targets similarly. Importantly, prior conditioning did not increase the multisensory response facilitation (i.e. (A+V)/2-AV) or the race model violation. Collectively, our results suggest that prior conditioning primarily increases the saliency of the auditory stimulus per se rather than influencing audiovisual integration directly. In turn, conditioned sounds are rendered more potent for increasing response accuracy or speed in detection of visual targets.}, department = {Research Group Noppeney}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic762}, event_place = {Fukuoka, Japan}, event_name = {12th International Multisensory Research Forum (IMRF 2011)}, DOI = {10.1068/ic762}, author = {Leo, F and Noppeney, U} } @Poster { TsiatsisN2011, title = {Auditory Processing under Steady State Visual Driving}, year = {2011}, month = {10}, volume = {12}, pages = {44}, abstract = {Neuronal oscillations are considered crucial for information processing in the brain as they can potentially regulate information flow and dynamically bind different cortical and non-cortical regions. This MEG study investigated whether the effect of a transient sound was modulated by the phase of oscillations in the visual cortex. To induce steady state oscillations in the visual cortex, we presented subjects with continuous visual signals luminance-modulated at 4Hz or 10Hz. The transient sounds were presented locked to four phases of the periodic visual stimulus (i.e. 0, 1 2, , 3 4). We then investigated whether the effect of sound depends on the phase of the visual steady state activity by testing for the interaction between sound and visual phase. Conversely, we will investigate the effect of the sound processing on the visual steady state processing given the state of the visual cortex. The results from the two experiments (4Hz \& 10Hz) will be combined and compared. Based on recent neurophysiological evidence, we hypothesize that oscillations at different frequencies play distinct functional roles in multisensory integration.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, event_place = {Heiligkreuztal, Germany}, event_name = {12th Conference of Junior Neuroscientists of T{\"u}bingen (NeNA 2011)}, author = {Tsiatsis, P and Noppeney, U} } @Poster { GianiEBKPN2011, title = {Steady-state responses in MEG demonstrate information integration within but not across the auditory and visual senses}, year = {2011}, month = {10}, volume = {12}, pages = {27}, abstract = {To form a unified percept of our environment, the human brain integrates information within and across the senses. This MEG study investigated interactions within and between sensory modalities using a frequency analysis of steady-{\^A}{\^a}state responses (SSR) to periodic auditory and/or visual inputs. The 3x3 factorial design, manipulated (1) modality (auditory only, visual only and audiovisual) and (2) temporal dynamics (static, dynamic1 and dynamic2). In the static conditions, subjects were presented with (1) visual gratings, luminance modulated at 6Hz and/or (2) pure tones, frequency modulated at 40 Hz. To manipulate perceptual synchrony, we imposed additional slow modulations on the auditory and visual stimuli either at same (0.2 Hz = synchronous) or different frequencies (0.2 Hz vs. 0.7 Hz = asynchronous). This also enabled us to investigate the integration of two dynamic features within one sensory modality (e.g. a pure tone frequency modulated at 40Hz \& amplitude modulated at 0.2Hz) in the dynamic conditions. We reliably identified crossmodulation frequencies when these two stimulus features were modulated at different frequencies. In contrast, no crossmodulation frequencies were identified when information needed to be combined from auditory and visual modalities. The absence of audiovisual crossmodulation frequencies suggests that the previously reported audiovisual interactions in primary sensory areas may mediate low level spatiotemporal coincidence detection that is prominent for stimulus transients but less relevant for sustained SSR responses. In conclusion, our results indicate that information in SSRs is integrated over multiple time scales within but not across sensory modalities at the primary cortical level.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, event_place = {Heiligkreuztal, Germany}, event_name = {12th Conference of Junior Neuroscientists of T{\"u}bingen (NeNA 2011)}, author = {Giani, A and Erick, O and Belardinelli, P and Kleiner, M and Preissl, H and Noppeney, U} } @Poster { RoheN2012, title = {The ventriloquist effect depends on audiovisual spatial discrepancy and visual reliability}, year = {2011}, month = {10}, volume = {12}, pages = {39}, abstract = {Humans integrate auditory and visual spatial cues to locate objects. Generally, location judgments are dominated by vision because observers localize an auditory cue close to a visual cue even if they have been instructed to ignore the latter (ventriloquist effect). A recent model of multisensory integration proposes that the ventriloquist effect is governed by two principles: First, spatially discrepant cues are only integrated if the observer infers that both cues stem from one object (principle of causal inference). Second, if the inference results in an assumption that both cues originate from one object the cues are integrated by weighting them according to their relative reliability (principle of Bayes-optimal cue weighting). Thus, the bimodal estimate of the object`s location has a higher reliability than each of the unisensory estimates per se. In order to test this model, 26 subjects were presented with spatial auditory (HRTF-convolved white noise) and visual cues (cloud of dots). The 5x5x5 factorial design manipulated (1) the auditory cue location, (2) the visual cue location and (3) the reliability of the visual cue via the width of the cloud of dots. Subjects were instructed to locate the auditory cue while ignoring the visual cue and to judge the spatial unity of both cues. In line with the principle of causal inference results showed that the ventriloquist effect was weaker and unity judgments were reduced for larger audiovisual discrepancies. In case of small spatial discrepancies the ventriloquist effect was weaker at low levels of visual reliability implying a Bayes-optimal strategy of cue weighting only if a common cause of both cues was assumed. A probabilistic model incorporating the principles of causal inference and Bayes-optimal cue weighting accurately fitted the behavioral data. Overall, the pattern of results suggested that both principles describe important processes governing multisensory integration.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, event_place = {Heiligkreuztal, Germany}, event_name = {12th Conference of Junior Neuroscientists of T{\"u}bingen (NeNA 2011)}, author = {Rohe, T and Noppeney, U} } @Poster { BelardinelliOBNP2011, title = {Mapping human brain function: a comparison between Variational Bayes Techniques and LCMV Beamformer}, year = {2011}, month = {6}, number = {665}, abstract = {In the last years several hierarchical Bayesian approaches to the MEG/EEG inverse problem have provided for a relevant contribution to the field of MEG/EEG source localization (Friston et al., 2008b; Wipf et al., 2010). While several methods show applicability under specific conditions, none is optimal without prior information. Meaningful results are bound to previously acquired information. In this work we used simulated MEG data to compare three Variational Bayes reconstruction algorithms implemented within the SPM software preprocessing framework (available from http://www.fil.ion.ucl.ac.uk/spm/): two approaches involving the search for optimal mixtures of anatomically defined priors (Greedy Search (GS) and Automatic Relevance Determination (ARD)) (Friston et al., 2008a) and a third approach using a single empirical prior based on the well established LCMV Beamformer technique (Van Veen et al., 1997), that we denominated Empirical Bayes Beamformer (EBB). Our parameters of interest were: 1. Number of simulated dipoles (1 to 3), 2. Relative position between dipoles (bilaterally symmetric versus random locations) 3. Dipole time-course correlation level (high/low). Each parameter configuration set was tested with 5 levels of SNR (from -30 to +10 dB) and 50 dipole position sets.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2011/HBM-2011-Noppeney.pdf}, department = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3419}, event_place = {Qu{\'e}bec City, Canada}, event_name = {17th Annual Meeting of the Organization for Human Brain Mapping (HBM 2011)}, author = {Belardinelli, P and Ortiz, E and Barnes, G and Noppeney, U and Preissl, H} } @Poster { GianiOBKPN2011, title = {Using steady state responses in MEG to study information integration within and across the senses}, year = {2011}, month = {6}, number = {1028}, abstract = {How does the brain integrate information within and across sensory modalities to form a unified percept? This question has previously been addressed using transient stimuli, analyzed in the time domain. Alternatively, sensory interactions can be investigated using frequency analyses of steady state responses (SSRs). SSRs are elicited by periodic sensory stimulation (such as frequency modulated tones). In the frequency domain, 'true' signal integration is reflected by non-linear crossmodulation terms (i.e. the sums and differences of the individual SSR frequencies). In addition, two signals may modulate the amplitude of the fundamental and harmonic frequencies of one another. Using visual (V) and auditory (A) SSRs, we investigated whether A and V signals are truly integrated as indexed by crossmodulation terms or simply modulate the expression of each other's dominant frequencies. To manipulate perceptual synchrony, we imposed additional slow modulations on the auditory and visual SSRs either at same or different frequencies. This also enabled us to investigate the integration of two dynamic features within one sensory modality.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2011/HBM-2011-Giani.pdf}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3419}, event_place = {Qu{\'e}bec City, Canada}, event_name = {17th Annual Meeting of the Organization for Human Brain Mapping (HBM 2011)}, author = {Giani, AS and Ortiz, EB and Belardinelli, P and Kleiner, M and Preissl, H and Noppeney, U} } @Poster { 6570, title = {Audio-visual interactions in binocular rivalry using the Shepard illusion in the auditory and visual domain}, year = {2010}, month = {6}, volume = {11}, number = {229}, abstract = {When both eyes are presented with dissimilar images, human observers report alternating percepts - a phenomenon known as binocular rivalry. Subjects were presented dichoptically with (1) a looming/receding starfield or (2) a looming/receding Shepard Zoom (Berger, Siggraph 2003), the visual equivalent of the Shepard tone illusion. In four psychophysical experiments, we investigated the influence of (1) a real complex tone rising/falling in pitch and (2) rising/falling Shepard tones on the dominance and suppression times of the rivaling visual motion percepts (relative to non-motion sounds or no sounds). First, we observed longer dominance times of looming than receding visual percepts even in the absence of sound. Second, auditory looming signals enhanced this looming bias by lengthening the dominance periods of their congruent visual looming percept. Third, receding auditory motion signals reduced the perceptual looming bias, though this effect was less pronounced and not consistently observed. Collectively, the results show that the perceptual predominance of looming relative to receding visual motion is amplified by congruent looming/receding auditory signals during binocular rivalry. Auditory looming/receding signals may influence the dominance times of their congruent and incongruent visual percepts via genuine multisensory and higher order attentional mechanisms at multiple levels of the cortical hierarchy.}, department = {Department B{\"u}lthoff}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/229}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Liverpool, UK}, event_name = {11th International Multisensory Research Forum (IMRF 2010)}, language = {en}, author = {Conrad, V and Kleiner, M and Hartcher-O‘Brien, J and Bartels, A and B{\"u}lthoff, HH and Noppeney, U} } @Poster { 7077, title = {Audiovisual Integration in Motion Discrimination: A multivariate decoding study}, year = {2010}, month = {6}, volume = {16}, number = {1587 WTh-AM}, pages = {160}, department = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageid=1}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Barcelona, Spain}, event_name = {16th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2010)}, language = {en}, author = {Tuennerhoff, J and Noppeney, U} } @Poster { 6615, title = {Audiovisual synchrony perception of speech and non-speech signals in musicians and non-musicians}, year = {2010}, month = {6}, volume = {16}, number = {1580 WTh-PM}, pages = {212}, department = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageid=1}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Barcelona, Spain}, event_name = {16th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2010)}, language = {en}, author = {Lee, HL and Noppeney, U} } @Poster { 6662, title = {Effective Connectivity in Multisensory integration: Insights from functional imaging in humans}, year = {2010}, month = {6}, volume = {11}, number = {126}, abstract = {Multisensory interactions emerge in a distributed neural system encompassing primary sensory and higher-order association areas. Multiple functional brain architectures have been proposed to mediate multisensory interactions in low-level auditory regions including feedforward thalamocortical, direct connections between sensory areas and feedback from higher-order association areas such as IPS or STS. We will review the potential and limitations of combining functional imaging and effective connectivity analyses for characterizing functional architectures of multisensory integration. In a series of three audiovisual integration studies, we combined dynamic causal modeling and Bayesian Model comparison to arbitrate between neural models where crossmodal effects are mediated via ‘direct‘ V1-A1 connectivity, ‘indirect‘ feedback connectivity from STS or both mechanisms. The first study manipulated the presence/absence of auditory and visual inputs and demonstrated that low level audiovisual salience effects are mediated via both direct and indirect mechanisms of audiovisual integration. The second study showed that audiovisual synchrony effects in low-level sensory areas are mediated primarily via direct connectivity. The third study demonstrated that semantic audiovisual (in)congruency effects in higher order visual object areas are elicited by direct influences from auditory areas rather than top-down effects from prefrontal cortices. We conclude by critically reviewing interpretational ambiguities and pitfalls of Dynamic Causal Modelling results based on fMRI data in humans.}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/126}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Liverpool, UK}, event_name = {11th International Multisensory Research Forum (IMRF 2010)}, language = {en}, author = {Noppeney, U and Werner, S and Ostwald, D and Lewis, R} } @Poster { 7076, title = {Investigating the effect of IPS TMS-stimulation on auditory and visual processing: A TMS-fMRI Study}, year = {2010}, month = {6}, volume = {16}, number = {10 MT-PM}, pages = {109}, department = {Department MRZ}, department2 = {Research Group Noppeney}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageid=1}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Barcelona, Spain}, event_name = {16th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2010)}, language = {en}, author = {Leit{\~a}o, J and Thielscher, A and Werner, S and Pohmann, R and Noppeney, U} } @Poster { 5950, title = {Audiovisual synchrony detection for speech and music signals}, year = {2009}, month = {7}, volume = {10}, number = {689}, pages = {177-178}, abstract = {Introduction: Audiovisual integration crucially depends on the relative timing of the auditory and visual signals. Although multisensory signals do not have to be precisely physically synchronous in order to be perceived as single temporal events, they have to co-occur within a certain temporal window of integration. To investigate how the human brain is fine tuned to the natural temporal statistics of audiovisual signals, we characterized the temporal integration window for natural speech, sinewave replicas of natural speech (SWS) and music in a simultaneity judgment task. Methods: The experimental paradigm manipulated: 1) stimulus class: speech vs. SWS vs. music, and 2) stimulus length: short (i.e. natural syllables, SWS syllables and tones) vs. long (i.e. natural sentences, SWS sentences and melodies). Audiovisual asynchronies ranged from -360ms (auditory leading) to 360 ms (visual leading) in 60ms increments. Eight participants performed the experiment on 2 separate days. The order of conditions was counterbalanced within and between subjects. The proportion of synchronous responses was computed for each participant. To refrain from making any distributional assumptions, the psychometric curves of each participant were characterized by four indices: (i) peak performance, (ii) peak location, (iii) width and (iv) asymmetry [1]. The four indices were analyzed using repeated measures of ANOVAs with stimulus class and stimulus length as within-subjects factors. Results: The ANOVA for peak performance did not show any significant main effects of stimulus class and length [F(2,14)<1, n.s.; F(1,7)=1.6, p=.24]. The ANOVA for peak location revealed a significant interaction between stimulus class and length [F(2,14)=3.8, p<.05]. Post-hoc paired t-tests revealed that the peak locations were significantly shifted towards auditory leading for melodies compared to tones [t(7)=2.4, p<.05], and for melodies compared to SWS sentences [t(7)=-2.3, p=.053]. The ANOVA for width revealed significant main effects of stimulus class and length [F(2,14)=9.3, p<.005; F(1,7)=11.0, p<.05] in the absence of an interaction [F(2,14)<1, n.s.]. Post-hoc paired t-tests revealed that the widths were wider for SWS speech than natural speech [t(7)=7.0, p<.005] and music [t(7)=2.4, p=.05]. Furthermore, the widths were narrower for long stimuli (i.e. sentences and melodies) than short stimuli (i.e. syllables and tones) [t(7)=-3.3, p<.05]. With respect to the asymmetry, there was a significant main effect of stimulus length [F(1,7)=7.1, p<.05] but not stimulus class [F(2,14)=1.1, p=.35], thus indicating that the psychometric curves were more asymmetric for long stimuli (i.e. sentences and melodies) than short stimuli (i.e. syllables and tones). Conclusion: Our results demonstrated that the psychometric curves were narrower and more asymmetric for long stimuli (i.e. sentences and melodies) than short stimuli (i.e. syllables and tones). Thus, participants may rely on information during the entire sentence for synchrony judgments. In addition, our results demonstrated that the psychometric curves were wider but less asymmetric for SWS speech relative to natural speech and music. Collectively, our results support the hypothesis that audiovisual speech perception is fine-tuned to the natural mapping between facial movement and spectrotemporal structure of natural speech.}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/689}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {New York, NY, USA}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, language = {en}, author = {Lee, H and Noppeney, U} } @Poster { 5949, title = {Auditory influences on the temporal dynamics of binocular rivalry}, year = {2009}, month = {7}, volume = {10}, number = {590}, pages = {102-103}, abstract = {Introduction When the two eyes are presented with dissimilar images, human observers report alternating percepts {\^a}{\euro}“ a phenomenon coined binocular rivalry. These perceptual fluctuations reflect competition between the two visual inputs both at lower, monocular and at binocular, higher-level processing stages. Even though perceptual transitions occur stochastically over time, their temporal dynamics can be modulated by changes in stimulus strength, context and attention. While increases in stimulus strength (such as contrast) primarily abbreviate suppression phases of a percept, attentional and contextual factors predominantly lengthen its dominance periods. Goals This project investigates the influence of concurrent auditory stimulation on the temporal dynamics of binocular rivalry. In two psychophysics studies, we investigated whether sounds that provide directionally congruent, incongruent or no motion information modulate the dominance periods of rivaling visual motion percepts. Methods In the first psychophysics study, observers dichoptically viewed random-dot kinematograms (RDK) at 0\% motion coherence in one eye and 50\% in the other in a stereoscope, while being concurrently presented with directionally congruent auditory motion, noise and no sound. In the second psychophysics study, they viewed two RDKs of opposite motion directions at 100\% coherence, with the auditory motion stimulus being directionally congruent with one of the two rivaling motion percepts. In both experiments, congruent auditory motion was temporally synchronized with visual motion to facilitate audio-visual integration into a coherent percept. Initial results Both experiments consistently revealed a statistically significant influence of sound on perceptual dominance times. In the first experiment, directionally congruent auditory motion but not noise increased the duration of the dominance phases of the RDK at 50\% motion coherence. In the second experiment, auditory motion lengthened the dominance periods of the directionally congruent 100\% RDK and abbreviated those of the directionally incongruent 100\% RDK. Initial conclusions The results demonstrate that auditory stimuli influence the temporal dynamics of binocular rivalry. Auditory motion lengthened the dominance periods of a visual motion percept when it was directionally congruent, but shortened them when it was directionally incongruent. Thus, the (in)congruency of auditory motion primarily influences the duration of the dominance periods similar to purely visual contextual effects, even though a small effect was also observed on the suppression periods. In conclusion, the human brain draws on information from multiple senses to arbitrate between multiple rivaling perceptual interpretations.}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/590}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {New York, NY, USA}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, language = {en}, author = {Conrad, V and Bartels, A and Kleiner, M and Noppeney, U} } @Poster { 5934, title = {The contributions of transient and sustained responses to audio visual integration of dynamic information}, year = {2009}, month = {7}, volume = {10}, number = {754}, pages = {251-252}, abstract = {Transient and sustained responses have been shown to play distinct functional roles in auditory processing: Transient responses may subserve rapid stimulus detection, sustained responses contribute to a more detailed sound characterization. While numerous fMRI studies have reported audiovisual interactions at multiple levels of the cortical hierarchy, they were not able to dissociate transient and sustained responses. This fMRI study optimized the design to disentangle the contributions of sustained, onset and offset responses to superadditive and subadditive interactions and localize the effects within the visual and auditory processing hierarchies. Seventeen subjects participated in this fMRI study (Siemens TimTrio 3T scanner, GE-EPI, TE = 40 ms, 38 axial slices, TR = 3.08 s). While engaged in a target detection task, they were presented with 1 s, 10 s, 20 s, 30 s blocks of (i) video clips of an expanding radial star-field, (ii) auditory pink noise or (iii) both. The velocity of the star-field and the sound amplitude were jointly modulated according to 0.1 Hz sine-wave function. The regressors of the general linear model were formed by convolving (i) delta functions encoding the onset and offset of each block and (ii) box car functions adjusted for block length with the hemodynamic response functions. Blocks of 1 s duration were modeled only as onsets. In addition, the model included targets and parametric modulators encoding the amplitude / velocity modulation. To allow for a random-effects analysis (SPM5), contrast images for each subject were entered into second level one sample t-tests. We tested for superadditive and subadditive interactions separately for onset, offset and sustained block responses. Results are reported at p<0.05 whole brain corrected. Significant audiovisual interactions were observed only for the transients: For the onsets, the interactions were superadditive in the fusiform gyrus (FFG), anterior calcarine sulcus (aCaS) and the cuneus (Cun) and subadditive in the posterior superior temporal gyrus/sulcus (pSTS/STG) and the precuneus (PrCun). For the offsets, the interactions were subadditive in the pSTS/STG region and the anterior intraparietal sulcus (aIPS). The regional response profiles were further characterized by their general responsiveness to visual, auditory and audiovisual onsets, offsets and sustained stimulation. This dissociated three activation profiles: (i) In FFG, only the onsets elicited a strong positive response with moderate responses to offsets and sustained stimulation. Further, the onset responses were positive for visual and audiovisual stimuli and negative for auditory stimuli. (ii) In aCaS, only the offsets elicited a positive response for all sensory modalities. (iii) In the remaining regions, both onsets and offsets elicited a positive response for all sensory modalities. In conclusion, audiovisual interactions are observed primarily for transient rather than sustained stimulation. Furthermore, these AV interactions are located in regions that respond primarily to transients. In contrast, no significant interactions were observed in regions that exhibited sustained responses to extended blocks of audiovisual stimulation.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/PosterIMRF2009_FINAL_[0].pdf}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/754}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {New York, NY, USA}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, language = {en}, author = {Werner, S and Noppeney, U} } @Poster { RotshteinDBRNH2009, title = {The neural correlates of picture naming: a VBM study of CT images}, journal = {NeuroImage}, year = {2009}, month = {7}, volume = {47}, number = {Supplement 1}, pages = {S174}, abstract = {Introduction Neuropsychological researchers have long been interested in relating the location of a patient's lesion to her cognitive impairment (e.g. Broca, 1861). Previous studies have primarily relied on small patient numbers, manual procedures for symptom-lesion mapping and binary data (Davidoff and De, 1994; DeLeon et al., 2007; Luzzatti et al., 2006; Tranel et al., 1997). Furthermore, the above studies did not control for potential confounds due to covarying factors, such as levels of attention; all of these factors may have contributed to the inconsistencies in the current literature. Here, we combined robust, automated lesion analysis procedures and standard CT imaging to investigate the neural networks involved in object naming in a clinically-relevant setting. Extensive prior research suggests that structures within the left ventral visual stream, anterior temporal and frontal cortex play a key role in mediating object naming (Humphreys et al., 1999; Martin, 2007; Price et al., 1996). Methods We tested 80 acute stroke patients using the object naming task from the Birmingham University Cognitive Screening (BUCS(Humphrey et al., 2007)). All patients underwent a CT scan as part of their clinical evaluation. Data were analysed using SPM5 \& SPM8b. The CT images were normalized to a CT template, the skull was removed. Subsequently the images were segmented and normalized again; finally the segmented grey matter (GM) images were smoothed using a 12mm Gaussian kernel (Fig. 1). The GM images were entered into a regression analysis that modeled subjects’ performance on the picture naming task; age, gender, mini-mental state and depression were included as covariates of no interest. (1) Using mass-univariate voxel-based morphometry (VBM), we tested at every single voxel for reduction in GM volume that was positively predicted by subjects’ picture naming performancen. (2) Combining Multivariate Bayesian Decoding (MVB(Friston et al., 2008) with smooth priors) and Bayesian model comparison, we compared the ability of GM voxels within (i) the entire brain, (ii) the left occipito-temporal, and (iii) left frontal cortices to predict subject's naming performance. Results On average patients named 9.1/14 ± 3.64std pictures correctly. When compared to age matched healthy controls, 42 patients were classified as impaired (<10, Fig. 2). VBM revealed that impaired picture naming predicted reduced GM in the left ventral visual stream, the anterior temporal pole and (bi-laterally) in the middle frontal gyrus (P <0.001, Fig 3). MVB and Bayesian model comparison showed that the left frontal gyrus did not predict behaviour better than the null model (log evidence (LG) <3). In contrast, the left occipito-temporal cortices were significantly better predictors than the null model (LG > 5, Fig 4). Conclusions In line with previous research we showed that lesions to the left ventral visual stream and frontal cortices impaired the ability to name objects. Using MVB we found that lesions within the left occipito-temporal cortices were better predictors of behaviour than lesions to the left frontal. We conclude that CT data of large number of patients can provide useful information for function-lesion mapping, here used to reveal the neural circuits involved in picture naming.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811909718890}, event_place = {San Francisco, CA, USA}, event_name = {15th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2009)}, DOI = {10.1016/S1053-8119(09)71889-0}, author = {Rotshtein, P and Douis, H and Bickerton, W and Riddoch, JM and Noppeney, U and Humphreys, GW} } @Poster { 6297, title = {Visual object categorization with conflicting auditory information}, journal = {NeuroImage}, year = {2009}, month = {7}, volume = {47}, number = {Supplement 1}, pages = {S43}, abstract = {Introduction Spatio-temporally coincident sensory signals usually emanate from the same event or object. Ideally, the human brain should integrate sensory information derived from a common source, while avoiding mergence of information from different sources. Indeed, it has long been recognized that multisensory integration breaks down, when sensory estimates are brought into large conflict. Nevertheless, conflicting task-irrelevant sensory information has been shown to interfere with decisions on task-relevant sensory input in a selective crossmodal attention paradigm. Combining psychophysics and fMRI, this study investigates how the human brain forms decisions about multisensory object categories when the senses disagree. Methods 18 subjects participated in this fMRI study (Siemens Trio 3T scanner, GE-EPI, TE=40 ms, 38 axial slices, TR=3.08 s). The 2\(\times\)2 factorial design manipulated (i) Visual category: Animal vs. Landmark, and (ii) Auditory category: Animal vocalization vs. Sound associated with Landmark. In a visual selective attention paradigm, subjects categorized degraded pictures while ignoring their accompanying intact sounds that could be either semantically congruent or incongruent. These particular stimulus categories were used as they have been associated with selective activations in the fusiform face (FFA) vs. parahippocampal place (PPA) area. To localize FFA and PPA, subjects were also presented pictures of faces and landmarks in target detection task. The activation trials were interleaved with 6 s fixation blocks. To allow for a random-effects analysis (SPM5), contrast images for each subject were entered into a second level ANOVA. In additional regression analyses, subjects’ category-selective responses (localizer) were regressed on their reaction time (RT) increases for incongruent relative to congruent trials (separately for landmarks and animals). We tested for (1) Incongruent vs. Congruent, (2) Landmark vs Animal (3) Incongruency \(\times\) Category interaction and (4) Landmark vs Animal predicted by RT incongruency effects. Within the object processing system, results are reported at p<0.05 whole brain cluster level corrected with p<0.001 voxel threshold. Results Behaviour: 2-way ANOVA of RT revealed main effect of incongruency, in the absence of a main effect of category or an interaction. No significant effects were revealed for performance accuracy. fMRI: 1) Incongruency effects: left inferior precentral sulcus, bilateral insula (only at uncorrected threshold) 2) Landmark-selective: bilateral PPA; Animal-selective: bilateral FFA 3) Incongruent > Congruent for Landmark > Animal: bilateral auditory cortex (due to stimulus confounds) 4) Landmark > Animal predicted by RT incongruency effects for landmarks: left fusiform Conclusions At the behavioural level, conflicting task-irrelevant auditory information interfered with subjects' categorization as indicated by longer reaction times for incongruent relative to congruent stimuli. At the neural level, this behavioural interference was mediated by increased activation in bilateral insula and left inferior precentral sulcus. No incongruency effects were observed commonly for all subjects in category-selective areas. However, in the left fusiform (FFA), activation for Landmark > Animal was increased for subjects that showed strong behavioural interference during categorization of visual landmarks accompanied by animal sounds. Thus, less modular and more distributed object representations in the occipitotemporal cortex may render subjects more susceptible to interfering object information from the auditory modality.}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811909700229}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {San Francisco, CA, USA}, event_name = {15th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2009)}, language = {en}, DOI = {10.1016/S1053-8119(09)70022-9}, author = {Adam, R and Noppeney, U} } @Poster { 5948, title = {Visual-Auditory Synchrony Boosts BOLD Response in Posterior Temporal and Occipital Cortices}, year = {2009}, month = {7}, volume = {10}, number = {571}, pages = {90-91}, abstract = {Introduction: Synchrony is a powerful cue for driving multisensory integration of dynamic stimuli and multisensory integration increases perceptual reliability. Thus ambiguous, dynamic, visual targets should be more accurately perceived when accompanied by concurrent auditory stimuli. For example, when a creature moving through the undergrowth is obscured by both intervening foliage \& conditions of poor illumination, it may be more readily identified when the sound of each footfall is audible - heralding motion of figure against background and foreground. This fMRI study was designed to identify the neural correlates of synchrony-induced multisensory integration during shape and motion discrimination. Methods: 16 subjects participated in this fMRI study (Siemens Allegra 3 T scanner, GE-EPI, TE=30, 38 axial slices, TR=3.08s). The 2x2 factorial design manipulated audiovisual synchrony (synchronous vs. asynchronous) and task (motion vs. shape discrimination). In a visual selective attention paradigm, subjects discriminated the shape or rotational motion of a dot array obscured by a rapidly-moving snow field. Each 2.5s trial consisted of 5 visual events that occurred at unpredictable intervals to eliminate anticipatory effects associated with regular stimulus timings. In 50\% of the trials, each visual event was accompanied by a simultaneous 50ms {\^a}{\euro}œclick{\^a}{\euro}? sound (synchronous condition), whilst in the other 50\% the auditory and visual events were offset by at least 120ms (asynchronous condition). Visual events were discrete rotations of one of 32 different symmetrical, concentrically distributed, dot arrays. Synchronous and asynchronous trials were presented in randomised order and task order was counterbalanced within and across subjects. To allow for a random-effects analysis (SPM5), contrast images for each subject were entered into second level one-sample t-tests. We tested for the main effects of synchrony and task and their interactions. Effects are reported at p<0.05 whole brain corrected. Behavioural Results: Subject performance was significantly more accurate under conditions of visual-auditory synchrony than asynchrony, whilst there was no significant difference in performance between the shape and motion tasks. The interactions indicate a significant performance improvement during synchronous trials for the motion task, but not the shape task. fMRI Results: In bilateral posterior occipital cortices audiovisual synchrony amplified the BOLD response, irrespective of task. In contrast, in lateral occipital cortex (LOC) and posterior superior temporal (pSTG) / supramarginal (SMG) gyri, synchrony-induced activation increases were task dependent: in LOC synchrony effects were selective for shape discrimination, whilst in pSTG/SMG they were selective for motion discrimination. Discussion/conclusion: Our results suggest that, under noisy visual conditions, visual perception can be enhanced by concurrent acoustic stimulation in the following way: Audiovisual coincidence may enhance saliency of visual stimuli by amplification of visual responses in occipital cortex and thus lead to improved figure-ground segmentation. These are then further amplified in higher order LOC and pSTG/SMG in a task-selective fashion.}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/571}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {New York, NY, USA}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, language = {en}, author = {Lewis, RK and Noppeney, U} } @Poster { 5263, title = {Audio-visual object integration in human STS: Determinants of stimulus efficacy and inverse effectiveness}, year = {2008}, month = {7}, volume = {9}, pages = {275}, abstract = {Combining fMRI and psychophysics, we investigated the neural mechanisms underlying the integration of higher-order audio-visual object features. In a target detection and a semantic categorization task, we presented subjects with pictures and sounds of tools or musical instruments while factorially manipulating the relative informativeness (degradation) of auditory and visual stimuli. Controlling for integration effects of low-level stimulus features, our experiment reveals integration of higher-order audio-visual object information selectively in anterior and posterior STS regions. Across subjects, audio-visual BOLD-interactions within these regions were strongly subadditive for intact stimuli and turned into additive effects for degraded stimuli. Across voxels, the probability to observe subadditivity increased with the strength of the unimodal BOLD-responses for both degraded and intact stimuli. Importantly, subjects’ multi-sensory behavioural benefit significantly predicted the mode of integration in STS : Subjects with greater benefits exhibited stronger superadditivity. In conclusion and according to the inverse effectiveness principle that is determined by stimulus efficacy, we demonstrate that the mode of multi-sensory integration in STS depends on stimulus informativeness, the voxel-specific responsiveness to unimodal stimulus components and the subject-specific multi-sensory behavioural benefit in object perception. The relationship between BOLD-responses and behavioural indices show the functional relevance of super- and subadditive modes of multi-sensory integration.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/IMRF2008_Swerner_[0].pdf}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Hamburg, Germany}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, language = {en}, author = {Werner, S and Noppeney, U} } @Poster { 5479, title = {Natural, metaphoric and linguistic auditory-visual interactions}, year = {2008}, month = {7}, volume = {9}, number = {206}, pages = {132}, abstract = {To form a coherent percept of our dynamic environment, the brain merges motion information from the auditory and visual senses. Yet, not only auditory motion, but also {\^a}{\euro}˜metaphoric{\^a}{\euro}™ pitch has been shown to influence visual motion discrimination. Here, we systematically investigate the neural systems that mediate auditory influences on visual motion discrimination in natural, metaphoric and linguistic contexts. In a visual selective attention paradigm, subjects discriminated the direction of visual motion at several levels of ambiguity, while ignoring a simultaneous auditory stimulus that was 1) {\^a}{\euro}˜natural{\^a}{\euro}™ MOTION: left vs. right moving white noise, 2) {\^a}{\euro}˜metaphoric{\^a}{\euro}™ PITCH: rising vs. falling pitch or 3) {\^a}{\euro}˜linguistic{\^a}{\euro}™ SPEECH: spoken German words denoting directions e.g. {\^a}{\euro}˜links{\^a}{\euro}™ vs. {\^a}{\euro}˜rechts{\^a}{\euro}™. Behaviourally, all three classes of auditory stimuli induced a comparable directional bias. At the neural level, the interaction between visual ambiguity and audition revealed an auditory influence on visual motion processing for MOTION in left hMT/V5 and for SPEECH in right intraparietal sulcus. Direct comparisons across contexts confirmed this functional dissociation: The interaction effect gradually decreased in left hMT+/V5 for MOTION>PITCH>SPEECH and in right IPS for SPEECH>PITCH>MOTION. In conclusion, while natural audio-visual integration of motion signals emerges in motion processing areas, linguistic interactions are revealed primarily in higher level fronto-parietal regions.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Hamburg, Germany}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, language = {en}, author = {Sadaghiani, S and Maier, J and Noppeney, U} } @Poster { 5267, title = {Physical and perceptual factors that determine the mode of audio-visual integration in distinct areas of the speech processing system}, year = {2008}, month = {7}, volume = {9}, number = {208}, pages = {133}, abstract = {Speech and non-speech stimuli differ in their (i) physical (spectro-temporal structure) and (ii) perceptual (phonetic/linguistic representation) aspects. To dissociate these two levels in audio-visual integration, this fMRI study employed original spoken sentences and their sinewave analogues that were either trained and perceived as speech (group 1) or non-speech (group 2). In both groups, all stimuli were presented in visual, auditory or audiovisual modalities. AV-integration areas were identified by superadditive and subadditive interactions in a random effects analysis. While no superadditive interactions were observed, subadditive effects were found in right superior temporal sulci for both speech and sinewave stimuli. The left ventral premotor cortex showed increased subadditive interactions for speech relative to their sinewave analogues irrespective of whether they were perceived as speech or non-speech. More specifically, only familiar auditory speech signal suppressed premotor activation that was elicited by passive lipreading in the visual conditions, suggesting that acoustic rather than perceptual/linguistic features determine AV-integration in the mirror neuron system. In contrast, AV-integration modes differed between sinewave analogues perceived as speech and non-speech in bilateral anterior STS areas that have previously been implicated in speech comprehension. In conclusion, physical and perceptual factors determine the mode of AV-integration in distinct speech processing areas.}, department = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Hamburg, Germany}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, language = {en}, author = {Lee, HL and Tuennerhoff, J and Werner, S and Pammi, C and Noppeney, U} } @Poster { 5265, title = {The prefrontal cortex accumulates object evidence through differential connectivity to the visual and auditory cortices}, journal = {NeuroImage}, year = {2008}, month = {6}, volume = {41}, number = {Supplement 1}, pages = {S150}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811908003133}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Melbourne, Australia}, event_name = {14th Annual Meeting of the Organization for Human Brain Mapping (HBM 2008)}, language = {en}, DOI = {10.1016/j.neuroimage.2008.04.008}, author = {Noppeney, U and Ostwald, D and Kleiner, M and Werner, S} } @Poster { 4566, title = {Accumulation of object evidence from multiple senses}, journal = {NeuroImage}, year = {2007}, month = {6}, volume = {36}, number = {Supplement 1}, pages = {S109}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/Poster_HBM2007_noppeney_[0].pdf}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811907002789}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Chicago, IL, USA}, event_name = {13th Annual Meeting of the Organization for Human Brain Mapping (HBM 2007)}, language = {en}, DOI = {10.1016/j.neuroimage.2007.03.045}, author = {Noppeney, U and Ostwald, D and Kleiner, M and Werner, S} } @Poster { 4565, title = {Multi-sensory interactions in perceptual and response selection processes}, journal = {NeuroImage}, year = {2007}, month = {6}, volume = {36}, number = {Supplement 1}, pages = {S120}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/Poster_HBM2007_werner_[0].pdf}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811907002789}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Chicago, IL, USA}, event_name = {13th Annual Meeting of the Organization for Human Brain Mapping (HBM 2007)}, language = {en}, DOI = {10.1016/j.neuroimage.2007.03.045}, author = {Werner, S and Noppeney, U} } @Poster { WrightMNVRGHP2007, title = {Selective activation around the left occipito-temporal sulcus for words relative to pictures: Individual variability or false positives?}, journal = {NeuroImage}, year = {2007}, month = {6}, volume = {36}, number = {Supplement 1}, pages = {S64}, department = {Research Group Noppeney}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811907002789}, event_place = {Chicago, IL, USA}, event_name = {13th Annual Meeting of the Organization for Human Brain Mapping (HBM 2007)}, DOI = {10.1016/j.neuroimage.2007.03.045}, author = {Wright, ND and Mechelli, A and Noppeney, U and Veltman, DJ and Rombouts, SARB and Glensman, J and Haynes, J-D and Price, CJ} } @Poster { 4609, title = {Neural systems involved in visual-tactile integration of shape information}, year = {2007}, month = {1}, department = {Department B{\"u}lthoff}, department2 = {Research Group Ernst}, department3 = {Research Group Noppeney}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Klosters, Switzerland}, event_name = {42nd Winter Seminar: “Biophysical Chemistry, Molecular Biology and Cybernetics of Cell Functions}, language = {en}, author = {Helbig, H and Noppeney, U and Ernst, M} } @Poster { SarkheilVBN2006, title = {Repetition priming in 3-D form and motion recognition}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {35}, abstract = {Behavioural studies have highlighted the importance of dynamic information for object recognition: Object motion provides additional views and image features that may facilitate the extraction of 3-D shape. However, even the direction of in-depth rotation that controls for shape and view information affects recognition performance. Here, we used a priming paradigm to investigate the effects of motion direction and form as well as their interaction during dynamic object recognition. Furthermore, two task-contexts were used to investigate the effects of top - down modulation on behavioural priming effects. For these contexts, subjects responded on the basis of object form or motion. Subjects were presented with pairs of successive objects rotating in depth. They performed a two-alternative forced choice form or motion categorisation to the second object. The conditions conformed to a 2 \(\times\) 2 \(\times\) 2 factorial design manipulating (i) object form (same/different pairs), (ii) in-depth rotation (same/different pairs), and (iii) task (motion/form). We observed that form and motion priming effects interacted and were enhanced in congruent task context. These findings suggest that dynamic 3-D object recognition is accomplished through interaction of form and motion information. Furthermore, both form and motion priming are influenced by task requirements. Future fMRI studies will investigate these effects at the neuronal level.}, department = {Department B{\"u}lthoff}, department2 = {Department Logothetis}, department3 = {Research Group Noppeney}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_place = {St. Petersburg}, event_name = {29th European Conference on Visual Perception}, DOI = {10.1177/03010066060350S101}, author = {Sarkheil, P and Vuong, QC and B{\"u}lthoff, HH and Noppeney, U} } @Poster { 4342, title = {Audio-visual integration during multisensory object categorization}, year = {2006}, month = {6}, day = {17}, number = {124}, abstract = {Tools or musical instruments are characterized by their form and sound. We investigated audio-visual integration during semantic categorization by presenting pictures and sounds of objects separately or together and manipulating the degree of information content. The 3 x 6 factorial design manipulated (1) auditory information (sound, noise, silence) and (2) visual information (6 levels of image degradation). The visual information was degraded by manipulating the amount of phase scrambling of the image (0\%, 20\%, 40\%, 60\%, 80\%, 100\%). Subjects categorized stimuli as musical instruments or tools. In terms of accuracy and reaction times (RT), we found significant main effects of (1) visual and (2) auditory information and (3) an interaction between the two factors. The interaction was primarily due to an increased facilitatory effect of sound for the 80\% degradation level. Consistently across the first 5 levels of visual degradation, we observed RT improvements for the sound-visual relative to the noise- or sile nce-visual conditions. Corresponding RT distributions significantly violated the so-called race model inequality across the first 5 percentiles of their cumulative density functions (even when controlling for low-level audio-visual interactions). These results suggest that redundant structural and semantic information is not independently processed but integrated during semantic categorization.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, web_url = {http://imrf.mcmaster.ca/2006/viewabstract.php\%3Fid=124\&symposium=0.html}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Dublin, Ireland}, event_name = {7th International Multisensory Research Forum (IMRF 2006)}, language = {en}, author = {Werner, S and Noppeney, U} } @Conference { RoheN2016_2, title = {See what you hear: Constructing a representation of the world across the senses}, journal = {Journal of Cognitive Neuroscience}, year = {2016}, month = {4}, day = {4}, number = {Supplement}, pages = {32}, abstract = {Our brains are continuously confronted with the problem of how to understand the sensory signals with which they are bombarded. For example, I can hear a bird and I can see a bird, but is it one bird singing on the branch, or is it two birds: one sitting on the branch and the other singing in the bush? How should the brain combine signals into a veridical percept of the environment without knowing whether they pertain to same or different events? Combining Bayesian Modelling with fMRI and EEG multivariate decoding we investigated how the brain solves this so-called Causal Inference problem. We demonstrate that the human brain integrates sensory signals into spatial representations in line with Bayesian Causal Inference by simultaneously encoding multiple spatial estimates along the cortical hierarchy. Critically, only at the top of the hierarchy, in anterior intraparietal sulcus, the uncertainty about the world’s causal structure is taken into account and sensory signals are integrated weighted by their bottom-up sensory reliability and top-down task-relevance into spatial priority maps as predicted by Bayesian Causal Inference. Characterizing the computational operations of multisensory interactions in human neocortex reveals the hierarchical nature of multisensory perception.}, talk_type = {Abstract Talk}, web_url = {https://www.cogneurosociety.org/documents/CNS_2016_Program.pdf}, event_place = {New York, NY, USA}, event_name = {23rd Annual Meeting of the Cognitive Neuroscience Society (CNS 2016)}, author = {Noppeney, U and Rohe, T} } @Conference { GianiN2012_2, title = {Towards multisensory awareness: Finding (neuronal) mechanisms that enable the detection and integration of audiovisual stimuli}, year = {2012}, month = {11}, volume = {13}, pages = {8}, abstract = {In daily life our sensory systems continuously receive complex information from different sensory modalities, such as vision, audition or touch. To form unified and coherent percepts this information needs to be integrated across the various senses; a process called multisensory integration. Multisensory information stemming from natural environments, such as market places or busy roads, can be extremely diverse. Moreover limited processing capacities allow only a small subset of the complex sensory information to enter awareness. Hence, two main questions arise: Which are the (neuronal) mechanisms that enable sensory awareness? And is perceptual awareness necessary for multisensory integration to occur? During my PhD I used magnetoencephalography (MEG) and psychophysical measurements, trying to find some answers to these questions, which I will be presenting during this talk.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_place = {Schramberg, Germany}, event_name = {13th Conference of the Junior Neuroscientists of T{\"u}bingen (NeNA 2012): Science and Education as Social Transforming Agents}, author = {Giani, A and Noppeney, U} } @Conference { EndresANG2012, title = {Explicit coding in the brain: data-driven semantic analysis of human fMRI BOLD responses with Formal Concept Analysis}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {67}, abstract = {We investigated whether semantic information about object categories can be obtained from human fMRI BOLD responses with Formal Concept Analysis (FCA), an order-theoretic approach for the analysis of semantic information, such as specialization hierarchies and parts-based codes. Unlike other analysis methods (eg hierarchical clustering), FCA does not impose inappropriate structure on the data. FCA is a mathematical formulation of the explicit coding hypothesis (Foldiak, 2009 Current Biology19R904-R906). A human subject was scanned viewing 72 gray-scale pictures of animate and inanimate objects in a target detection task. To apply FCA, we employ a hierarchical Bayesian classifier, which maps fMRI responses onto binary attributes, and these onto object labels. The connectivity matrix between attributes and labels is the formal context for FCA. FCA revealed a clear dissociation between animate and inanimate objects in a high-level visual area (inferior temporal cortex, IT), with the inanimate category including plants. The inanimate category was subdivided into plants and non-plants when we increased the number of attributes extracted from the fMRI responses. FCA also displayed organizational differences between the IT and the primary visual cortex, V1. We show that familiarity and similarity ratings are strongly correlated with the attributes computed from the fMRI signal.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_place = {Alghero, Italy}, event_name = {35th European Conference on Visual Perception}, DOI = {10.1177/03010066120410S101}, author = {Endres, D and Adam, R and Noppeney, U and Giese, M} } @Conference { AdamN2012, title = {Processing of audiovisual phonological incongruency depends on awareness}, journal = {Seeing and Perceiving}, year = {2012}, month = {6}, day = {21}, volume = {25}, pages = {168}, abstract = {Capacity limitations of attentional resources allow only a fraction of sensory inputs to enter our awareness. Most prominently, in the attentional blink, the observer fails to detect the second of two rapidly successive targets that are presented in a sequence of distractor items. This study investigated whether phonological (in)congruency between visual target letters and spoken letters is modulated by subjects’ awareness. In a visual attentional blink paradigm, subjects were presented with two visual targets (buildings and capital Latin letters, respectively) in a sequence of rapidly presented distractor items. A beep was presented always with T1. We manipulated the presence/absence and phonological congruency of the spoken letter that was presented concurrently with T2. Subjects reported the identity of T1 and T2 and reported the visibility of T2. Behaviorally, subjects correctly identified T2 when it was reported to be either visible or unsure, while performances were below chance level when T2 was reported to be invisible. At the neural level, the anterior cingulate was activated for invisible>unsure>visible T2. In contrast, visible relative to invisible trials increased activation in bilateral cerebellum, pre/post-central gyri extending into parietal sulci and bilateral inferior occipital gyri. Incongruency effects were observed in the left inferior frontal gyrus, caudate nucleus and insula only for visible stimuli. In conclusion, phonological incongruency is processed differently when subjects are aware of the visual stimulus. This indicates that multisensory integration is not automatic but depends on subjects’ cognitive state.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://booksandjournals.brillonline.com/content/10.1163/187847612x647982}, event_place = {Oxford, UK}, event_name = {13th International Multisensory Research Forum (IMRF 2012)}, DOI = {10.1163/187847612X647982}, author = {Adam, R and Noppeney, U} } @Conference { NoppeneyASMLWOLC2012, title = {Different classes of audiovisual correspondences are processed at distinct levels of the cortical hierarchy}, journal = {Seeing and Perceiving}, year = {2012}, month = {6}, day = {20}, volume = {25}, pages = {69}, abstract = {The brain should integrate sensory inputs only when they emanate from a common source and segregate those from different sources. Sensory correspondences are important cues informing the brain whether two sensory inputs are generated by a common event and should hence be integrated. Most prominently, sensory inputs should co-occur in time and space. More complex audiovisual stimuli may also be congruent in terms of semantics (e.g., objects and source sounds) or phonology (e.g., spoken and written words; linked via common linguistic labels). Surprisingly, metaphoric relations (e.g., pitch and height) have also been shown to influence audiovisual integration. The neural mechanisms that mediate these metaphoric congruency effects are only poorly understood. They may be mediated via (i) natural multisensory binding, (ii) common linguistic labels or (iii) semantics. In this talk, we will present a series of studies that investigate whether these different types of audiovisual correspondences are processed by distinct neural systems. Further, we investigate how those systems are employed by metaphoric audiovisual correspondences. Our results demonstrate that different classes of audiovisual correspondences influence multisensory integration at distinct levels of the cortical hierarchy. Spatiotemporal incongruency is detected already at the primary cortical level. Natural (e.g., motion direction) and phonological incongruency influences MSI in areas involved in motion or phonological processing. Critically, metaphoric interactions emerge in neural systems that are shared with natural and semantic incongruency. This activation pattern may reflect the ambivalent nature of metaphoric audiovisual interactions relying on both natural and semantic correspondences.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, talk_type = {Abstract Talk}, web_url = {http://booksandjournals.brillonline.com/content/10.1163/187847612x646901}, event_place = {Oxford, UK}, event_name = {13th International Multisensory Research Forum (IMRF 2012)}, DOI = {10.1163/187847612X646901}, author = {Noppeney, U and Adam, R and Sadaghiani, S and Maier, JX and Lee, HL and Werner, S and Ostwald, D and Lewis, R and Conrad, V} } @Conference { ConradVN2012_2, title = {Interactions between apparent motion rivalry in vision and touch}, journal = {Seeing and Perceiving}, year = {2012}, month = {6}, day = {19}, volume = {25}, pages = {26-27}, abstract = {Introduction In multistable perception, the brain alternates between several perceptual explanations of ambiguous sensory signals. Recent studies have demonstrated crossmodal interactions between ambiguous and unambiguous signals. However it is currently unknown whether multiple bistable processes can interact across the senses. [1, 2] Using the apparent motion quartet in vision and touch, this study investigated whether bistable perceptual processes for vision and touch are independent or influence each other when powerful cues of congruency are provided to facilitate visuotactile integration. [3] Methods When two visual flashes and/or tactile vibration pulses are presented alternately along the two diagonals of the rectangle, subjects’ percept vacillates between vertical and horizontal apparent motion in the visual and/or tactile modalities [4]. Observers were presented with unisensory (visual/tactile), visuotactile spatially congruent and incongruent apparent motion quartets and reported their visual or tactile percepts. Results Congruent stimulation induced pronounced visuotactile interactions as indicated by increased dominance times and \%-bias for the percept already dominant under unisensory stimulation. Yet, the temporal dynamics did not converge for congruent stimulation. It depended also on subjects’ attentional focus and was generally slower for tactile than visual reports. Conclusion Our results support Bayesian approaches to perceptual inference, where the probability of a perceptual interpretation is determined by combining a modality-specific prior with incoming visual and/or tactile evidence. Under congruent stimulation, joint evidence from both senses decelerates the rivalry dynamics by stabilizing the more likely perceptual interpretation. Importantly, the perceptual stabilization was specific to spatiotemporally congruent visuotactile stimulation indicating multisensory rather than cognitive bias mechanisms.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, department3 = {Research Group Ernst}, talk_type = {Abstract Talk}, web_url = {http://booksandjournals.brillonline.com/content/10.1163/187847612x646497}, event_place = {Oxford, UK}, event_name = {13th International Multisensory Research Forum (IMRF 2012)}, DOI = {10.1163/187847612X646497}, author = {Conrad, V and Vitello, MP and Noppeney, U} } @Conference { Noppeney2011_2, title = {Multisensory integration: From human behavior to neural systems}, year = {2011}, month = {12}, day = {10}, abstract = {To interact effectively with our environment, the human brain integrates information from multiple senses. While multisensory integration was traditionally assumed to be deferred until later processing stages in higher order association cortices, more recent studies have revealed multisensory integration even in putatively unisensory cortical areas. Given this multitude of multisensory integration sites, characterizing their functional similarities and differences is of critical importance. Combining functional imaging (fMRI), effective connectivity analyses and psychophysics in humans, our studies highlight three main aspects: First, the locus of multisensory integration depends on the type of information being integrated and the specific relationship between the auditory and visual signals. Second, in terms of functional brain architectures, effective connectivity analyses suggested that audiovisual interactions in low level sensory areas are mediated by multiple mechanisms including feedforward thalamocortical, direct connections between sensory areas and top down influences from higher order association areas. Third, the regional response profile and activation patterns depend on the relative reliability of the unisensory signals. Paralleling behavioural indices of multisensory integration, multivariate pattern analyses revealed that multisensory integration increased the discriminability and hence reliability of multisensory representations already at the primary cortical level. From the macroscopic perspective of regional BOLD signals, our data provide further evidence for ‘Bayesian-ish’ integration of signals from multiple senses.}, department = {Research Group Noppeney}, talk_type = {Invited Lecture}, web_url = {http://www.bccn-tuebingen.de/events/bernstein-symposium-series-2011/symposium-c/talks-and-abstracts.html}, event_place = {T{\"u}bingen, Germany}, event_name = {Bernstein Symposium ''Bayesian Inference: From Spikes to Behaviour''}, author = {Noppeney, U} } @Conference { LeoN2011_2, title = {Monetary conditioning influences audio-visual integration by increasing sound saliency}, year = {2011}, month = {11}, day = {18}, pages = {7}, abstract = {We investigated the effect of prior conditioning an auditory stimulus on audiovisual integration in a series of three psychophysical experiments. The three experiments employed the same acquisition phase (monetary conditioning) but different multisensory paradigms (Exp1: 2AFC visual detection; Exp2: redundant target paradigm; Exp3: redundant target paradigm using near threshold visual and auditory stimuli). In the acquisition phase, subjects were presented with three pure tones (= conditioned stimuli, CSs) that were paired with positive, negative or neutral unconditioned stimuli (USs, monetary: +50 euro cents, -50 cents, 0 cents). In a 2AFC visual selective attention paradigm, detection of near-threshold Gabors was improved by concurrent sounds that had previously been paired with a positive outcome relative to neutral sounds. Taken together, redundant target paradigms results showed that sounds previously paired with positive or negative outcomes increased response speed to both auditory and audiovisual targets similarly. Importantly, prior conditioning did not increase the multisensory response facilitation (i.e. (A+V)/2-AV) or the race model violation. Collectively, our results suggest that prior conditioning primarily increases the saliency of the auditory stimulus per se rather than influencing audiovisual integration directly. In turn, conditioned sounds are rendered more potent for increasing response accuracy or speed in detection of visual targets.}, department = {Research Group Noppeney}, department2 = {Department B{\"u}lthoff}, talk_type = {Abstract Talk}, web_url = {http://www.sinp-web.org/site/}, institution = {Max Planck Institute for Biological Cybernetics}, event_place = {Bologna, Italy}, event_name = {Riunione Autunnale di Societ{\`a} Italiana di Neuropsicologia (SINP 2011)}, author = {Leo, F and Noppeney, U} } @Conference { Noppeney2010, title = {Audiovisual integration within the cortical hierarchy: neural mechanisms and functional relevance}, year = {2010}, month = {10}, day = {29}, department = {Research Group Noppeney}, talk_type = {Invited Lecture}, web_url = {http://www.cin.uni-tuebingen.de/news-events/browse-all-events/detail/view/338/page/2/conference-2nd-cin-retreat-2010.html}, event_place = {Reutlingen, Germany}, event_name = {2nd CIN Retreat}, author = {Noppeney, U} } @Conference { AdamN2009, title = {Auditory influence on visual object categorization: an fMRI study}, journal = {Journal of Molecular Neuroscience}, year = {2009}, month = {11}, day = {23}, volume = {39}, number = {Supplement 1}, pages = {S4-S5}, abstract = {In our daily life, we are often confronted with objects that give rise to signals in multiple sensory modalities. The brain’s challenge is to integrate information from multiple senses into a unified percept. Combining psychophysics and fMRI, the present study investigates how task-irrelevant object source sounds affect category-selective activations for visual faces in the fusiform and landmarks in the parahippocampal gyri. In a visual selective attention paradigm, subjects categorized degraded object pictures as landmarks or faces while ignoring intact object source sounds that were either semantically congruent or incongruent. The 2X2 factorial design manipulated (i) Visual category: Animal vs. Landmark, and (ii) Auditory category: Animal vocalization vs. Sound associated with Landmark. Behaviorally, incongruent trials were associated with longer response times. This effect emerged due to an interference of incongruent sounds associated with landmarks on the categorization of visual faces. At the neural level, only landmark-selective activation in the parahippocampal gyrus, but not face selective activations in the fusiform gyrus were modulated by the congruency of an irrelevant object sound. More specifically, activations in the anterior parahippocampal gyrus showed additive effects of both visual and auditory category information. Effective connectivity analysis indicated that parahippocampal responses are amplified by incongruent auditory inputs via enhanced coupling between auditory and occipito-temporal cortices. Collectively, these results suggest that a region in the parahippocampal gyrus integrates information about object categories from multiple senses. In contrast, face-selective responses in fusiform gyrus are more robust and less influenced by task-irrelevant sounds, even in the context of a behavioural audiovisual interference effect.}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://link.springer.com/content/pdf/10.1007\%2Fs12031-009-9309-1.pdf}, event_place = {Eilat, Israel}, event_name = {18th Annual Meeting of the Israel Society for Neuroscience (ISFN 2009)}, DOI = {10.1007/s12031-009-9309-1}, author = {Adam, R and Noppeney, U} } @Conference { 5951, title = {Inverse effectiveness in BOLD-response and its behavioural relevance in object categorization}, year = {2009}, month = {7}, volume = {10}, pages = {395}, abstract = {Inverse effectiveness has been invoked as a principle to describe synergistic effects of multisensory integration in neuronal and behavioural responses as a function of stimulus properties (e.g. intensity) or efficacy. We characterized {\^a}{\euro}˜inverse effectiveness{\^a}{\euro}™ and its behavioural relevance at the macroscopic level, as provided by the fMRI BOLD-response, based on (1) stimulus-induced and (2) intrinsic response variability across voxels or subjects during object categorization. Subjects categorized audiovisual object stimuli with the relative informativeness (i.e. degradation) of the auditory and visual inputs being manipulated factorially. Controlling for low-level integration processes, higher-level audiovisual integration was observed selectively in the superior temporal sulci (STS) bilaterally. (1) Consistent with the law of inverse effectiveness, auditory and visual informativeness determined the operational modes of audiovisual integration in STS similarly to the influence of physical stimulus intensity in the superior colliculus: while multisensory interactions were primarily subadditive and even suppressive for intact stimuli, additive effects were observed for degraded, near threshold stimuli. (2) Exploiting intrinsic variability across voxels and/or subjects, we demonstrate that superadditivity for audiovisual stimuli increases with decreasing unimodal responses. This inverse relationship could be explained by inherent statistical dependencies between superadditive and unimodal responses. Nevertheless, the superadditive responses in STS (and only in this region) were related to subjects{\^a}{\euro}™ audiovisual behavioral benefit: only subjects that benefited from multisensory integration exhibited superadditive interactions, while those that did not benefit showed suppressive interactions. In conclusion, the (super)additive and subadditive integration modes in STS are functionally relevant and related to behavioral indices of multisensory integration with superadditive interactions mediating successful audiovisual object categorization. We argue that inverse effectiveness trends in neuronal and behavioural responses may be intimately related and mutually predictive.}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/897}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {New York, NY, USA}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, language = {en}, author = {Noppeney, U and Werner, S} } @Conference { 5264, title = {The prefrontal cortex accumulates object evidence through differential connectivity to the visual and auditory cortices}, year = {2008}, month = {7}, volume = {9}, number = {189}, pages = {118}, abstract = {To form categorical decisions about objects in our environment, the human brain accumulates noisy sensory information over time till a decisional threshold is reached. Combining fMRI and Dynamic Causal Modelling (DCM), we investigated how the brain accumulates evidence from the auditory and visual senses through distinct interactions amongst brain regions. In a visual selective attention paradigm, subjects categorized visual action movies while ignoring their accompanying soundtracks that were semantically congruent or incongruent. Both, auditory and visual information could be intact or degraded. Reaction times as a marker for the time to decisional threshold accorded with random walk models of decision making. At the neural level, incongruent auditory sounds induced amplification of the task-relevant visual information in the occipito-temporal cortex. Importantly, only the left inferior frontal sulcus (IFS) showed an activation pattern of an accumulator region i.e. (i) positive reactiontime and (ii) incongruency effects that were increased for unreliable (=degraded) visual and interfering reliable (=intact) auditory information, which -based on our DCM analysis- were mediated by increased forward connectivity from visual regions. Thus, to form interpretations and decisions that guide behavioural responses, the IFS may accumulate multi-sensory evidence over time through dynamic weighting of its connectivity to auditory and visual regions.}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, institution = {Max Planck Institute for Biological Cybernetics}, event_place = {Hamburg, Germany}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, language = {en}, author = {Noppeney, U and Ostwald, D and Werner, S and Kleiner, M} } @Conference { 5266, title = {How priming enables us to understand speech in an impoverished context}, journal = {NeuroImage}, year = {2008}, month = {6}, volume = {41}, number = {Supplement 1}, pages = {S156}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811908003133}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Melbourne, Australia}, event_name = {14th Annual Meeting of the Organization for Human Brain Mapping (HBM 2008)}, DOI = {10.1016/j.neuroimage.2008.04.008}, author = {T{\"u}nnerhoff, J and Noppeney, U} } @Conference { 5079, title = {Audio-visual interactions in perception and response selection}, year = {2008}, month = {3}, day = {4}, volume = {50}, pages = {69}, abstract = {Both physical and physiological transmission times can differ between audition and vision. Under certain conditions, the brain reduces perceived asynchrony by adapting to this temporal discrepancy. In two experiments we investigated whether this recalibration is specific to auditory and visual stimuli, or whether other modality combinations (audiotactile, visuotactile) are affected, as well. We presented asynchronous audiovisual signals, with either auditory leading or visual leading. Then, using temporal order judgments we measured observers point of subjective simultaneity for three modality combinations. Results indicate an adjustment of perceived simultaneity for the audiovisual and the visuotactile modality pairs. We conclude that audiovisual adaptation is the result of a change of processing latencies of visual events. In a second experiment, we corroborate this finding. We demonstrate that reaction times to visual signals, but not to tactile or auditory signals, change as a result of audiovisual recalibration.}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {https://www.teap.de/memory/Abstractband_50_2008_marburg.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Marburg, Germany}, event_name = {50. Tagung Experimentell Arbeitender Psychologen (TeaP 2008)}, author = {Werner, S and Noppeney, U} } @Conference { 5378, title = {Naturalistic, metaphoric and linguistic auditory-visual interactions}, year = {2007}, month = {11}, volume = {37}, number = {662.8}, abstract = {To form a coherent percept of our dynamic environment, the brain merges motion information from the auditory and visual senses. Yet, not only ‘naturalistic’ direction information of auditory motion, but also ‘metaphoric’ direction information of dynamic pitch has been shown to influence visual motion discrimination. Here, we systematically investigate the neural systems that mediate auditory influences on visual motion discrimination in naturalistic, metaphoric and linguistic contexts. In a visual selective attention paradigm, subjects discriminated the direction of visual motion at several levels of ambiguity, while ignoring a simultaneous auditory stimulus that could be either congruent, absent or incongruent. Audio-visual congruency was defined at the 1) naturalistic, 2) metaphoric and 3) linguistic levels using three classes of auditory stimuli: 1) MOTION: left vs. right moving white noise, 2) PITCH: rising vs. falling pitch and 3) SPEECH: spoken German words denoting directions e.g. ‘links’ vs. ‘rechts’. At the behavioral level, all three classes of auditory stimuli induced a directional bias. Furthermore, this bias was not significantly different across contexts. At the neural level, the auditory influence on visual motion processing was identified through (1) the interaction between visual ambiguity and audition (presence vs. absence) and (2) the incongruency effect, separately for MOTION, PITCH and SPEECH. A significant interaction was revealed for MOTION in left hMT+/V5 and for SPEECH in right intraparietal sulcus. An incongruency effect was only observed for SPEECH in the left superior temporal gyrus and right middle frontal gyrus. Direct comparisons across contexts confirmed this functional dissociation: The interaction effect gradually decreased in left hMT+/V5 for MOTION>PITCH>SPEECH and in right IPS for SPEECH>PITCH>MOTION. Our results suggest that audition can influence visual motion discrimination at the naturalistic, metaphoric and linguistic levels. Yet, even though the auditory bias was comparable across contexts, our functional imaging results suggest that they are mediated by different neural systems. While naturalistic influences emerge in motion processing areas, linguistic interactions are revealed primarily in higher-level fronto-parietal regions.}, department = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://www.sfn.org/am2007/}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {San Diego, CA, USA}, event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)}, language = {en}, author = {Sadaghiani, S and Noppeney, U} } @Conference { Noppeney2007, title = {Audio-Visual Interactions within the Cortical Hierarchy}, journal = {Neural Plasticity}, year = {2007}, month = {9}, day = {19}, volume = {2007}, number = {23250}, pages = {29}, abstract = {To interact effectively with our environment, the human brain integrates information from multiple senses into a coherent percept. Neurophysiological and functional imaging studies have revealed multi-sensory interactions in a widespread neural system encompassing subcortical structures, putative ’unisensory’ and higher order association cortices. Combining fMRI and psychophysics, we investigated where and how different types of sensory features are combined within the cortical hierarchy. We presented subjects with object pictures and sounds while factorially manipulating the relative informativeness of the auditory and visual modalities. While low level spatio-temporal interactions were found within Heschl’s gyrus, higher order object features were integrated within the superior temporal sulci (STS) bilaterally. Consistent with the law of inverse effectiveness, the multisensory interactions in STS were primarily suppressive for intact, but (super)additive for degraded stimuli. These distinct modes paralleled behavioral indices of multi-sensory enhancement showing the greatest multisensory benefit for degraded stimuli. In conclusion, the human brain integrates information that is abstracted from its sensory inputs atmultiple levels of the cortical hierarchy. The operational mode of audio-visual integration is dictated by the informativeness of the auditory and visual modalities.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2366049/}, event_place = {Trieste, Italy}, event_name = {39th Annual General Meeting of the European Brain and Behaviour Society (EBBS 2007)}, DOI = {10.1155/2007/23250}, author = {Noppeney, U} } @Conference { 4607, title = {Neural systems involved in visual-tactile integration of shape information}, year = {2007}, month = {6}, pages = {3}, abstract = {The brain integrates multisensory information to create a coherent and more reliable perceptual estimate of the environment. This multisensory estimate is a linear combination of the individual unimodal estimates that are weighted by their relative reliabilities (e.g., Ernst and Banks, Nature, 2002). Here we explored the neural substrates underlying visual-tactile integration in shape processing. To identify multisensory integration sites, we correlated behavioural data with neural activity evoked by multisensory integration. Observers were presented with elliptical shapes that they could see and/or touch. Observers' task was to judge the shape of the ellipse. Introducing conflicts between seen and felt shape allowed us to examine whether participants relied more on visual or tactile information (relative weight of vision and touch). To manipulate the weight attributed to vision, we degraded visual information. We observed a decrease in visual weight when vision was degraded and thus became less reliable. Discrimination performance increased when both modalities were presented together, indicating that visual and tactile shape information is indeed fused. BOLD response bilaterally in the anterior IPS is modulated by visual input. Change in BOLD signal these areas correlates with cue weights, suggesting that this activity reflects the relative weighting of vision and touch.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/Psychologie_und_Gehirn_2007-Helbig.pdf}, department = {Department B{\"u}lthoff}, department2 = {Research Group Ernst}, department3 = {Research Group Noppeney}, talk_type = {Abstract Talk}, web_url = {https://eldorado.tu-dortmund.de/bitstream/2003/24421/1/Psychologie_und_Gehirn_2007.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Dortmund, Germany}, event_name = {33. Tagung Psychologie und Gehirn (PuG 2007)}, language = {en}, author = {Helbig, HB and Noppeney, U and Ernst, M} }