@Article{ PerrodinKALP2015, title = {Who is That? Brain Networks and Mechanisms for Identifying Individuals}, journal = {Trends in Cognitive Sciences}, year = {2015}, month = {12}, volume = {19}, number = {12}, pages = {783–796}, abstract = {Social animals can identify conspecifics by many forms of sensory input. However, whether the neuronal computations that support this ability to identify individuals rely on modality-independent convergence or involve ongoing synergistic interactions along the multiple sensory streams remains controversial. Direct neuronal measurements at relevant brain sites could address such questions, but this requires better bridging the work in humans and animal models. Here, we overview recent studies in nonhuman primates on voice and face identity-sensitive pathways and evaluate the correspondences to relevant findings in humans. This synthesis provides insights into converging sensory streams in the primate anterior temporal lobe (ATL) for identity processing. Furthermore, we advance a model and suggest how alternative neuronal mechanisms could be tested.}, web_url = {http://www.sciencedirect.com/science/article/pii/S1364661315002260}, state = {published}, DOI = {10.1016/j.tics.2015.09.002}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Abel TJ; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Article{ PetkovKMMRL2015, title = {Different forms of effective connectivity in primate frontotemporal pathways}, journal = {Nature Communications}, year = {2015}, month = {1}, volume = {6}, number = {6000}, pages = {1-12}, abstract = {It is generally held that non-primary sensory regions of the brain have a strong impact on frontal cortex. However, the effective connectivity of pathways to frontal cortex is poorly understood. Here we microstimulate sites in the superior temporal and ventral frontal cortex of monkeys and use functional magnetic resonance imaging to evaluate the functional activity resulting from the stimulation of interconnected regions. Surprisingly, we find that, although certain earlier stages of auditory cortical processing can strongly activate frontal cortex, downstream auditory regions, such as voice-sensitive cortex, appear to functionally engage primarily an ipsilateral temporal lobe network. Stimulating other sites within this activated temporal lobe network shows strong activation of frontal cortex. The results indicate that the relative stage of sensory processing does not predict the level of functional access to the frontal lobes. Rather, certain brain regions engage local networks, only parts of which have a strong functional impact on frontal cortex.}, web_url = {http://www.nature.com/ncomms/2015/150123/ncomms7000/pdf/ncomms7000.pdf}, state = {published}, DOI = {10.1038/ncomms7000}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kikuchi Y; Milne AE; Mishkin M; Rauschecker JP; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ PerrodinKLP2014_2, title = {Natural asynchronies in audiovisual communication signals regulate neuronal multisensory interactions in voice-sensitive cortex}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, year = {2015}, month = {1}, volume = {112}, number = {1}, pages = {273–278}, abstract = {When social animals communicate, the onset of informative content in one modality varies considerably relative to the other, such as when visual orofacial movements precede a vocalization. These naturally occurring asynchronies do not disrupt intelligibility or perceptual coherence. However, they occur on time scales where they likely affect integrative neuronal activity in ways that have remained unclear, especially for hierarchically downstream regions in which neurons exhibit temporally imprecise but highly selective responses to communication signals. To address this, we exploited naturally occurring face- and voice-onset asynchronies in primate vocalizations. Using these as stimuli we recorded cortical oscillations and neuronal spiking responses from functional MRI (fMRI)-localized voice-sensitive cortex in the anterior temporal lobe of macaques. We show that the onset of the visual face stimulus resets the phase of low-frequency oscillations, and that the face–voice asynchrony affects the prominence of two key types of neuronal multisensory responses: enhancement or suppression. Our findings show a three-way association between temporal delays in audiovisual communication signals, phase-resetting of ongoing oscillations, and the sign of multisensory responses. The results reveal how natural onset asynchronies in cross-sensory inputs regulate network oscillations and neuronal excitability in the voice-sensitive cortex of macaques, a suggested animal model for human voice areas. These findings also advance predictions on the impact of multisensory input on neuronal processes in face areas and other brain regions.}, web_url = {http://www.pnas.org/content/112/1/273.full.pdf+html}, state = {published}, DOI = {10.1073/pnas.1412817112}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Article{ PerrodinKPL2013, title = {Auditory and Visual Modulation of Temporal Lobe Neurons in Voice-Sensitive and Association Cortices}, journal = {Journal of Neuroscience}, year = {2014}, month = {2}, volume = {34}, number = {7}, pages = {2524-2537}, abstract = {Effective interactions between conspecific individuals can depend upon the receiver forming a coherent multisensory representation of communication signals, such as merging voice and face content. Neuroimaging studies have identified face- or voice-sensitive areas (Belin et al., 2000; Petkov et al., 2008; Tsao et al., 2008), some of which have been proposed as candidate regions for face and voice integration (von Kriegstein et al., 2005). However, it was unclear how multisensory influences occur at the neuronal level within voice- or face-sensitive regions, especially compared with classically defined multisensory regions in temporal association cortex (Stein and Stanford, 2008). Here, we characterize auditory (voice) and visual (face) influences on neuronal responses in a right-hemisphere voice-sensitive region in the anterior supratemporal plane (STP) of Rhesus macaques. These results were compared with those in the neighboring superior temporal sulcus (STS). Within the STP, our results show auditory sensitivity to several vocal features, which was not evident in STS units. We also newly identify a functionally distinct neuronal subpopulation in the STP that appears to carry the area's sensitivity to voice identity related features. Audiovisual interactions were prominent in both the STP and STS. However, visual influences modulated the responses of STS neurons with greater specificity and were more often associated with congruent voice-face stimulus pairings than STP neurons. Together, the results reveal the neuronal processes subserving voice-sensitive fMRI activity patterns in primates, generate hypotheses for testing in the visual modality, and clarify the position of voice-sensitive areas within the unisensory and multisensory processing hierarchies.}, web_url = {http://www.jneurosci.org/content/34/7/2524.full.pdf+html}, state = {published}, DOI = {10.1523/JNEUROSCI.2805-13.2014}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ PerrodinKLP2011, title = {Voice Cells in the Primate Temporal Lobe}, journal = {Current Biology}, year = {2011}, month = {8}, volume = {21}, number = {16}, pages = {1408-1415}, abstract = {Communication signals are important for social interactions and survival and are thought to receive specialized processing in the visual and auditory systems. Whereas the neural processing of faces by face clusters and face cells has been repeatedly studied [1,2,3,4,5], less is known about the neural representation of voice content. Recent functional magnetic resonance imaging (fMRI) studies have localized voice-preferring regions in the primate temporal lobe [6,7], but the hemodynamic response cannot directly assess neurophysiological properties. We investigated the responses of neurons in an fMRI-identified voice cluster in awake monkeys, and here we provide the first systematic evidence for voice cells. “Voice cells” were identified, in analogy to “face cells,” as neurons responding at least 2-fold stronger to conspecific voices than to “nonvoice” sounds or heterospecific voices. Importantly, whereas face clusters are thought to contain high proportions of face cells [4] responding broadly to many faces [1,2,4,5,8,9,10], we found that voice clusters contain moderate proportions of voice cells. Furthermore, individual voice cells exhibit high stimulus selectivity. The results reveal the neurophysiological bases for fMRI-defined voice clusters in the primate brain and highlight potential differences in how the auditory and visual systems generate selective representations of communication signals.}, file_url = {fileadmin/user_upload/files/publications/2011/Perrodin_Voice_cells_CB_2011.pdf}, web_url = {http://www.sciencedirect.com/science/article/pii/S0960982211008293}, state = {published}, DOI = {10.1016/j.cub.2011.07.028}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}} } @Article{ 5641, title = {Multisensory interactions in primate auditory cortex: fMRI and electrophysiology}, journal = {Hearing Research}, year = {2009}, month = {12}, volume = {258}, number = {1-2}, pages = {80-88}, abstract = {Recent studies suggest that cross-modal integration does not only occur in higher association cortices but also in early stages of auditory processing, possibly in primary or secondary auditory cortex. Support for such early cross-modal influences comes from functional magnetic resonance imaging experiments in humans and monkeys. However we argue that the current understanding of neurovascular coupling and of the neuronal basis underlying the imaging signal does not permit the direct extrapolation from imaging data to properties of neurons in the same region. While imaging can guide subsequent electrophysiological studies, only these can determine whether and how neurons in auditory cortices combine information from multiple modalities. Indeed, electrophysiological studies only partly confirm the findings from imaging studies. While recordings of field potentials reveal strong influences of visual or somatosensory stimulation on synaptic activity even in primary auditory cortex, single unit studies find only a small minority of neurons as being influenced by non-acoustic stimuli. We propose the analysis of the information coding properties of individual neurons as one way to quantitatively determine whether the representation of our acoustic environment in (primary) auditory cortex indeed benefits from multisensory input.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T73-4VSB18D-2-C&_cdi=5047&_user=29041&_orig=search&_coverDate=03%2F06%2F2009&_sk=999999999&view=c&wchp=dGLbVlW-zSkzk&md5=c2454278adededccb8c1313ff960fd5c&ie=}, state = {published}, DOI = {10.1016/j.heares.2009.02.011}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 5826, title = {Optimizing the imaging of the monkey auditory cortex: sparse vs. continuous fMRI}, journal = {Magnetic Resonance Imaging}, year = {2009}, month = {10}, volume = {27}, number = {8}, pages = {1065-1073}, abstract = {The noninvasive imaging of the monkey auditory system with functional magnetic resonance imaging (fMRI) can bridge the gap between electrophysiological studies in monkeys and imaging studies in humans. Some of the recent imaging of monkey auditory cortical and subcortical structures relies on a technique of “sparse imaging,” which was developed in human studies to sidestep the negative influence of scanner noise by adding periods of silence in between volume acquisition. Among the various aspects that have gone into the ongoing optimization of fMRI of the monkey auditory cortex, replacing the more common continuous-imaging paradigm with sparse imaging seemed to us to make the most obvious difference in the amount of activity that we could reliably obtain from awake or anesthetized animals. Here, we directly compare the sparse- and continuous-imaging paradigms in anesthetized animals. We document a strikingly greater auditory response with sparse imaging, both quantitatively and qualitatively, which includes a more expansive and robust tonotopic organization. There were instances where continuous imaging could better reveal organizational properties that sparse imaging missed, such as aspects of the hierarchical organization of auditory cortex. We consider the choice of imaging paradigm as a key component in optimizing the fMRI of the monkey auditory cortex.}, file_url = {/fileadmin/user_upload/files/publications/Petkov%20-%20Sparse%20vs%20Continuous%20MRI%20-%20MRI%20-%2009_5826[0].pdf}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T9D-4VT0H13-4-9&_cdi=5112&_user=29041&_orig=search&_coverDate=03%2F09%2F2009&_sk=999999999&view=c&wchp=dGLbVtb-zSkzS&md5=0d3cd98058c4dc88b34df154dd61d1b0&ie=}, state = {published}, DOI = {10.1016/j.mri.2009.01.018}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 5374, title = {Where are the human speech and voice regions, and do other animals have anything like them?}, journal = {Neuroscientist}, year = {2009}, month = {10}, volume = {15}, number = {5}, pages = {419-429}, abstract = {Modern lesion and imaging work in humans has been clarifying which brain regions are involved in the processing of speech and language. Concurrently, some of this work has aimed to bridge the gap to the seemingly incompatible evidence for multiple brain-processing pathways that first accumulated in nonhuman primates. For instance, the idea of a posterior temporal-parietal “Wernicke’s” territory, which is thought to be instrumental for speech comprehension, conflicts with this region of the brain belonging to a spatial “where” pathway. At the same time a posterior speech-comprehension region ignores the anterior temporal lobe and its “what” pathway for evaluating the complex features of sensory input. Recent language models confirm that the posterior or dorsal stream has an important role in human communication, by a re-conceptualization of the “where” into a “how-to” pathway with a connection to the motor system for speech comprehension. Others have tried to directly implicate the “what” pathway for speech comprehension, relying on the growing evidence in humans for anterior-temporal involvement in speech and voice processing. Coming full circle, we find that the recent imaging of vocalization and voice preferring regions in nonhuman primates allows us to make direct links to the human imaging data involving the anterior-temporal regions. We describe how comparisons of the structure and function of the vocal communication systems of human and nonhuman primates is clarifying the evolutionary relationships and the extent to which different species can model human brain function.}, web_url = {http://nro.sagepub.com/cgi/rapidpdf/1073858408326430v1}, state = {published}, DOI = {10.1177/1073858408326430}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Obleser J} } @Article{ 4903, title = {Visual modulation of neurons in auditory cortex}, journal = {Cerebral Cortex}, year = {2008}, month = {7}, volume = {18}, number = {7}, pages = {1560-1574}, abstract = {Our brain integrates the information provided by the different sensory modalities into a coherent percept, and recent studies suggest that this process is not restricted to higher association areas. Here we evaluate the hypothesis that auditory cortical fields are involved in cross-modal processing by probing individual neurons for audiovisual interactions. We find that visual stimuli modulate auditory processing both at the level of field potentials and single-unit activity and already in primary and secondary auditory fields. These interactions strongly depend on a stimulus’ efficacy in driving the neurons but occur independently of stimulus category and for naturalistic as well as artificial stimuli. In addition, interactions are sensitive to the relative timing of audiovisual stimuli and are strongest when visual stimuli lead by 20--80 msec. Exploring the underlying mechanisms, we find that enhancement correlates with the resetting of slow (~10 Hz) oscillations to a phase angle of optimal excitability. These results demonstrate that visual stimuli can modulate the firing of neurons in auditory cortex in a manner that depends on stimulus efficacy and timing. These neurons thus meet the criteria for sensory integration and provide the auditory modality with multisensory contextual information about co-occurring environmental events.}, file_url = {/fileadmin/user_upload/files/publications/Kayser_CerCor_08_[0].pdf}, web_url = {http://cercor.oxfordjournals.org/cgi/reprint/18/7/1560}, state = {published}, DOI = {10.1093/cercor/bhm187}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 4895, title = {Morphing rhesus monkey vocalizations}, journal = {Journal of Neuroscience Methods}, year = {2008}, month = {5}, volume = {170}, number = {1}, pages = {45-55}, abstract = {The capability to systematically morph between different types of animal vocalizations will give us insights into how the features of vocal sounds are perceived by listening individuals. Following behavioral study, neurophysiological recordings in nonhuman animals, could reveal how neurons support the perception of communication signals. Signal processing algorithms are now available for creating sophisticated morphs between complex sounds, like human speech. However, most morphing approaches have been applied to harmonic sounds whose frequency components can be readily identified. We show that auditory morphing can be more generally applied by describing a procedure for using the STRAIGHT signal processing package to gradually morph between: (1) vocalizations from different macaque monkeys, (2) acoustically dissimilar types of monkey vocalizations, such as a ‘coo’ and a ‘grunt’, and (3) monkey and human vocalizations. We then evaluated the quality of the morphs and obtained classification curves from human listeners who seemed to categorize the monkey vocalizations much like the ones produced by humans. The outlined procedures prepare macaque-monkey vocalizations for neuroethological study and the approach establishes basic principles that will assist in creating suitable morphs of other natural sounds and animal vocalizations.}, file_url = {/fileadmin/user_upload/files/publications/Chakladar%20-%20Morphing%20Monkey%20Vocalizations%20-%20JNMethods%20-%202008_4895[0].pdf}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T04-4RM1KNS-2-S&_cdi=4852&_user=29041&_orig=search&_coverDate=05%2F15%2F2008&_sk=998299998&view=c&wchp=dGLbVlz-zSkWW&md5=996e3edcb35ab972100511d15d95b34a&ie=/sdarticle.pdf}, state = {published}, DOI = {10.1016/j.jneumeth.2007.12.023}, author = {Chakladar S{chakladar}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Article{ 4896, title = {A voice region in the monkey brain}, journal = {Nature Neuroscience}, year = {2008}, month = {3}, volume = {11}, number = {3}, pages = {367-374}, abstract = {For vocal animals, recognizing species-specific vocalizations is important for survival and social interactions. In humans, a voice region has been identified that is sensitive to human voices and vocalizations. As this region also strongly responds to speech, it is unclear whether it is tightly associated with linguistic processing and is thus unique to humans. Using functional magnetic resonance imaging of macaque monkeys (Old World primates, Macaca mulatta) we discovered a high-level auditory region that prefers species-specific vocalizations over other vocalizations and sounds. This region not only showed sensitivity to the ‘voice‘ of the species, but also to the vocal identify of conspecific individuals. The monkey voice region is located on the superior-temporal plane and belongs to an anterior auditory what pathway. These results establish functional relationships with the human voice region and support the notion tha t, for different primate species, the anterior temporal regions of the brain are adapted for recognizing communication signals from conspecifics.}, file_url = {/fileadmin/user_upload/files/publications/Petkov%20-%20Voice%20Area%20-%20NatureNeuro%20-%202008_4896[0].pdf}, web_url = {http://www.nature.com/neuro/journal/v11/n3/pdf/nn2043.pdf}, state = {published}, DOI = {10.1038/nn2043}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Whittingstall K{kevin}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 4589, title = {Tuning to sound frequency in auditory field potentials}, journal = {Journal of Neurophysiology}, year = {2007}, month = {9}, volume = {98}, number = {3}, pages = {1806-1809}, abstract = {Neurons in auditory cortex are selective for the frequency content of acoustical stimuli. Classically, this response selectivity is studied at the single neuron level. However, current research often employs functional imaging techniques to investigate the organization of auditory cortex. The signals underlying the imaging data arise from neural mass action and reflect the properties of populations of neurons. For example, the signal used for functional magnetic resonance imaging (fMRI-BOLD) was shown to correlate with the oscillatory activity quantified by local field potentials (LFP). This raises the questions of how the frequency selectivity in neuronal population signals compares to the tuning of spiking responses. To address this, we quantified tuning properties of auditory evoked potentials (AEP), different frequency bands of the LFP, analog multi-unit (AMUA) and spike-sorted single- and multi-unit activity in auditory cortex. The AMUA showed a close correspondence in frequency tuning to the spike-sorted activity. In contrast, for the LFP we found a clear dissociation of high and low frequency bands: there was a gradual increase of tuning-curve similarity, tuning specificity and information about the stimulus with increasing LFP frequency. While properties of the high frequency LFP matched those of spiking activity, the lower frequency bands differed considerably, as did the AEP. These results demonstrate that electrophysiological population responses exhibit varying degrees of frequency tuning and suggest that those functional imaging methods that are related to high frequency oscillatory activity should well reflect the neuronal processing of sound frequency.}, file_url = {/fileadmin/user_upload/files/publications/Kayser_Jnp_07_4589[0].pdf}, web_url = {http://jn.physiology.org/cgi/reprint/98/3/1806}, state = {published}, DOI = {10.1152/jn.00358.2007}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 4372, title = {Encoding of illusory continuity in primary auditory cortex}, journal = {Neuron}, year = {2007}, month = {4}, volume = {54}, number = {1}, pages = {153-165}, abstract = {When interfering objects occlude a scene, the visual system restores the occluded information. Similarly, when a sound of interest (a ‘foreground’ sound) is interrupted (occluded) by loud noise, the auditory system restores the occluded information. This process, called auditory induction, can be exploited to create a continuity illusion. When a segment of a foreground sound is deleted, and loud noise fills the missing portion, listeners incorrectly report hearing the foreground continuing through the noise. Here we reveal the neurophysiological underpinnings of illusory continuity in single neuron responses from awake macaque monkeys’ primary auditory cortex (A1). A1 neurons represented the missing segment of occluded tonal foregrounds by responding to discontinuous foregrounds interrupted by intense noise as if they were responding to the complete foregrounds. By comparison, simulated peripheral responses represented only the noise and not the occluded foreground. The results reveal that many A1 single neuron responses closely follow the illusory percept.}, file_url = {/fileadmin/user_upload/files/publications/Petkov%20-%20Continuity%20-%20Neuron%20-%202007_4372[0].pdf}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6WSS-4NDNGYC-J-2&_cdi=7054&_user=29041&_orig=browse&_coverDate=04%2F05%2F2007&_sk=999459998&view=c&wchp=dGLzVzz-zSkzV&md5=db7fec9f2a46ac02c086165545e53cb1&ie=/sdarticle.pdf}, state = {published}, DOI = {10.1016/j.neuron.2007.02.031}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; O'Connor KN; Sutter ML} } @Article{ 4282, title = {Functional imaging reveals visual modulation of specific fields in auditory cortex}, journal = {Journal of Neuroscience}, year = {2007}, month = {2}, volume = {27}, number = {8}, pages = {1824-1835}, abstract = {Merging information from different senses is essential for successfully interacting with real world situations. Indeed, sensory integration can reduce perceptual ambiguity, speed reactions or change the qualitative sensory experience. It is widely held that integration occurs at later processing stages and mostly in higher association cortices. However, recent studies suggest that sensory convergence can already occur in primary sensory cortex. A good model for early convergence proved to be the auditory cortex, which can be modulated by visual and tactile stimulation. However, given the large number and small size of auditory fields, neither microelectrode recordings, nor human imaging have systematically identified which fields are susceptible to multisensory influences. To reconcile findings from human imaging with anatomical knowledge from non-human primates, we exploited high-resolution imaging (fMRI) of the macaque monkey to study the modulation of auditory processing by visual stimulation. Using a func tional parcellation of auditory cortex, we localized modulations to individual fields. Our results demonstrate that both primary (core) and non-primary auditory fields (belt) can be activated by mere presentation of visual scenes. Audio-visual convergence was restricted to caudal fields (prominently core field: A1, and belt fields CM, CL and MM) and continued in the auditory parabelt and the superior temporal sulcus. The same fields exhibited enhancement of auditory activation by visual stimulation and showed stronger enhancement for less effective stimuli, two characteristics of sensory integration. Altogether, these findings reveal multisensory modulation of auditory processing prominently in caudal fields but also at the lowest stages of auditory cortical processing.}, file_url = {/fileadmin/user_upload/files/publications/Kayser_JNeurosci_07_4282[0].pdf}, web_url = {http://www.jneurosci.org/cgi/reprint/27/8/1824}, state = {published}, DOI = {10.1523/JNEUROSCI.4737-06.2007}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 3967, title = {Functional Imaging Reveals Numerous Fields in the Monkey Auditory Cortex}, journal = {PLoS Biology}, year = {2006}, month = {7}, volume = {4}, number = {7}, pages = {1213-1226}, abstract = {Anatomical studies propose that the primate auditory cortex contains more fields than have actually been functionally confirmed or described. Spatially resolved functional magnetic resonance imaging (fMRI) with carefully designed acoustical stimulation could be ideally suited to extend our understanding of the processing within these fields. However, after numerous experiments in humans, many auditory fields remain poorly characterized. Imaging the macaque monkey is of particular interest as these species have a richer set of anatomical and neurophysiological data to clarify the source of the imaged activity. We functionally mapped the auditory cortex of behaving and of anesthetized macaque monkeys with high resolution fMRI. By optimizing our imaging and stimulation procedures, we obtained robust activity throughout auditory cortex using tonal and band-passed noise sounds. Then, by varying the frequency content of the sounds, spatially specific activity patterns were observed over this region. As a result, the activity patterns could be assigned to many auditory cortical fields, including those whose functional properties were previously undescribed. The results provide an extensive functional tessellation of the macaque auditory cortex and suggest that 11 fields contain neurons tuned for the frequency of sounds. This study provides functional support for a model where three fields in primary auditory cortex are surrounded by eight neighboring “belt” fields in non-primary auditory cortex. The findings can now guide neurophysiological recordings in the monkey to expand our understanding of the processing within these fields. Additionally, this work will improve fMRI investigations of the human auditory cortex.}, file_url = {/fileadmin/user_upload/files/publications/Petkov_PLOS_06_3967[0].pdf}, web_url = {http://biology.plosjournals.org/archive/1545-7885/4/7/pdf/10.1371_journal.pbio.0040215-S.pdf}, state = {published}, DOI = {10.1371/journal.pbio.0040215}, EPUB = {e215}, author = {Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis N{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 3537, title = {Adaptive Stimulus Optimization for Auditory Cortical Neurons}, journal = {Journal of Neurophysiology}, year = {2005}, month = {12}, volume = {94}, number = {6}, pages = {4051-4067}, abstract = {Despite the extensive physiological work performed on auditory cortex, our understanding of the basic functional properties of auditory cortical neurons is incomplete. For example, it remains unclear what stimulus features are most important for these cells. Determining these features is challenging given the considerable size of the relevant stimulus parameter space as well as the unpredictable nature of many neurons' responses to complex stimuli due to nonlinear integration across frequency. Here we used an adaptive stimulus optimization technique to obtain the preferred spectral input for neurons in macaque primary auditory cortex (AI). This method uses a neuron's response to progressively modify the frequency composition of a stimulus to determine the preferred spectrum. This technique has the advantage of being able to incorporate nonlinear stimulus interactions into a "best estimate" of a neuron's preferred spectrum. The resulting spectra displayed a consistent, relatively simple circumscribed form that was similar across scale and frequency in which excitation and inhibition appeared about equally prominent. In most cases, this structure could be described using two simple models, the Gabor and difference of Gaussians functions. The findings indicate that AI neurons are well suited for extracting important scale-invariant features in sound spectra and suggest that they are designed to efficiently represent natural sounds.}, file_url = {/fileadmin/user_upload/files/publications/Petkov%202005%20-%20Dyslexia%20-%20CBR_3537[0].pdf}, web_url = {http://jn.physiology.org/content/94/6/4051.full.pdf+html}, state = {published}, DOI = {10.​1152/​jn.​00046.​2005}, author = {O'Connor KN; Petkov CI{chrisp}; Sutter ML{chrisp}} } @Article{ 3562, title = {Mechanisms for allocating auditory attention: an auditory saliency map}, journal = {Current Biology}, year = {2005}, month = {11}, volume = {15}, number = {21}, pages = {1943-1947}, abstract = {Our nervous system is confronted with a barrage of sensory stimuli, but neural resources are limited and not all stimuli can be processed to the same extent. Mechanisms exist to bias attention towards the particularly salient events thereby providing a weighted representation of our environment [1]. Our understanding of these mechanisms is still limited, but theoretical models can replicate such a weighting of sensory inputs and provide a basis for understanding the underlying principles [2, 3]. Here we describe such a model for the auditory system – an auditory saliency map. We experimentally validate the model on natural acoustical scenarios demonstrating that it reproduces human judgments of auditory saliency and predicts the detectability of salient sounds embedded in noisy backgrounds. In addition, it also predicts the natural orienting behavior of naïve macaque monkeys to the same salient stimuli. The structure of the suggested model is identical to that of succ essf ully use d vi sual saliency maps. Hence we conclude that saliency is determined either by implementing similar mechanisms in different unisensory pathways, or by the same mechanism in multisensory areas. In any case, our results demonstrate that different primate sensory systems rely on common principles for extracting relevant sensory events.}, file_url = {/fileadmin/user_upload/files/publications/Kayser_CurrentBiology_05_3562[0].pdf}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-4HH8DGG-X-B&_cdi=6243&_user=29041&_orig=search&_coverDate=11%2F08%2F2005&_sk=999849978&view=c&wchp=dGLbVtb-zSkzS&md5=803d2148009bc032de6a8539f2cc79b5&ie=}, state = {published}, DOI = {10.1016/j.cub.2005.09.040}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Lippert M{mlippert}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 3533, title = {Integration of touch and sound in auditory cortex}, journal = {Neuron}, year = {2005}, month = {10}, volume = {48}, number = {2}, pages = {373-384}, abstract = {To form a coherent percept of the environment, our brain combines information from different senses. Such multisensory integration occurs in higher association cortices; but supposedly it also occurs in early sensory areas. Confirming the latter hypothesis, we unequivocally demonstrate supra-additive integration of touch and sound stimulation at the second stage of the auditory cortex. Using high-resolution fMRI of the macaque monkey, we quantified the integration of auditory broad-band noise and tactile stimulation of hand and foot in anaesthetized animals. Integration was found posterior to and along the lateral side of the primary auditory cortex in the caudal auditory belt. Integration was stronger for temporally coincident stimuli and obeyed the principle of inverse effectiveness: greater enhancement for less effective stimuli. These findings demonstrates that multisensory integration occurs early and close to primary sensory areas, and – as it occurs in anaesthet ized ani mals //! --MFG_und--//amp;#8211; suggests that this integration is mediated by pre-attentive bottom-up mechanisms.}, file_url = {/fileadmin/user_upload/files/publications/Kayser_Neuron_05_3533[0].pdf}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6WSS-4HC6GJV-V-3&_cdi=7054&_user=29041&_orig=search&_coverDate=10%2F20%2F2005&_sk=999519997&view=c&wchp=dGLbVlz-zSkzV&md5=2d8adb856c014d50807f6b8898864236&ie=/sdarticle.pdf}, state = {published}, DOI = {10.1016/j.neuron.2005.09.018}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ 3290, title = {Auditory perceptual grouping and attention in dyslexia}, journal = {Cognitive Brain Research}, year = {2005}, month = {4}, volume = {24}, number = {2}, pages = {343-354}, abstract = {Despite dyslexia affecting a large number of people, the mechanisms underlying the disorder remain undetermined. There are numerous theories about the origins of dyslexia. Many of these relate dyslexia to low-level, sensory temporal processing deficits. Another group of theories attributes dyslexia to language-specific impairments. Here we show that dyslexics perform worse than controls on an auditory perceptual grouping task. The results show differences in performance between the groups that depend on sound frequency and not solely on parameters related to temporal processing. Performance on this task suggests that dyslexics’ deficits may result from impaired attentional control mechanisms. Such deficits are neither modality nor language specific and may help to reconcile differences between theories of dyslexia.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0926641005000510}, state = {published}, DOI = {10.1016/j.cogbrainres.2005.02.021}, author = {Petkov C{chrisp}; OConnor K; Benmoshe G; Baynes K; Sutter M} } @Article{ 3200, title = {Attentional modulation of human auditory cortex}, journal = {Nat Neurosci}, year = {2004}, volume = {7}, number = {6}, pages = {658}, abstract = {Attention powerfully influences auditory perception, but little is understood about the mechanisms whereby attention sharpens responses to unattended sounds. We used high-resolution surface mapping techniques (using functional magnetic resonance imaging, fMRI) to examine activity in human auditory cortex during an intermodal selective attention task. Stimulus-dependent activations (SDAs), evoked by unattended sounds during demanding visual tasks, were maximal over mesial auditory cortex. They were tuned to sound frequency and location, and showed rapid adaptation to repeated sounds. Attention-related modulations (ARMs) were isolated as response enhancements that occurred when subjects performed pitch-discrimination tasks. In contrast to SDAs, ARMs were localized to lateral auditory cortex, showed broad frequency and location tuning, and increased in amplitude with sound repetition. The results suggest a functional dichotomy of auditory cortical fields: stimulus-determined mesial fields that faithfully transmit acoustic information, and attentionally labile lateral fields that analyze acoustic features of behaviorally relevant sounds.}, note = {1097-6256 Journal Article}, file_url = {/fileadmin/user_upload/files/publications/pdf3200.pdf}, state = {published}, author = {Petkov CI{chrisp}; Kang X; Alho K; Bertrand O; Yund EW; Woods DL} } @Article{ 3202, title = {Correlates of memory function in community-dwelling elderly: the importance of white matter hyperintensities}, journal = {J Int Neuropsychol Soc}, year = {2004}, volume = {10}, number = {3}, pages = {371}, abstract = {We sought to identify magnetic resonance- (MR)-imaged structures associated with declarative memory in a community-dwelling sample of elderly Mexican-American individuals with a spectrum of cognitive decline. Measured structures were the hemispheric volumes of the hippocampus (HC), parahippocampal gyrus, and remaining temporal lobes, as well as severity of white matter signal hyperintensities (WMH). Participants were an imaged subsample from the Sacramento Area Latino Study of Aging (SALSA), N = 122. Individuals were categorized as normal, memory impaired (MI), cognitively impaired non-demented (CIND), or demented. We show that WMH was the strongest structural predictor for performance on a delayed free-recall task (episodic memory) in the entire sample. The association of WMH with delayed recall was most prominent in elderly normals and mildly cognitively impaired individuals with no dementia or impairment of daily function. However, the left HC was associated with verbal delayed recall only in people with dementia. The right HC volume predicted nonverbal semantic-memory performance. We conclude that WMH are an important pathological substrate that affects certain memory functions in normal individuals and those with mild memory loss and discuss how tasks associated with WMH may rely upon frontal lobe function.}, note = {1355-6177 Journal Article}, file_url = {/fileadmin/user_upload/files/publications/pdf3202.pdf}, state = {published}, author = {Petkov CI{chrisp}; Wu CC; Eberling JL; Mungas D; Zrelak PA; Yonelinas AP; Haan MN; Jagust WJ} } @Article{ 3201, title = {Illusory sound perception in macaque monkeys}, journal = {Journal of Neuroscience}, year = {2003}, month = {10}, volume = {23}, number = {27}, pages = {9155}, abstract = {In most natural listening environments, noise occludes objects of interest, and it would be beneficial for an organism to correctly identify those objects. When a sound of interest ("foreground" sound) is interrupted by a loud noise, subjects perceive the entire sound, even if the noise was intense enough to completely mask a part of it. This phenomenon can be exploited to create an illusion: when a silent gap is introduced into the foreground and high-intensity noise is superimposed into the gap, subjects report the foreground as continuing through the noise although that portion of the foreground was deleted. This phenomenon, referred to as auditory induction or amodal completion, is conceptually similar to visual induction, fill-in, illusory motion, and illusory contours. Two rhesus macaque monkeys performed a task designed to assess auditory induction. They were trained to discriminate complete stimuli from those containing a silent gap in the presence of two types of noise. Interrupting noise temporally coincided only with the gap, and in humans this causes induction. Surrounding noise temporally encompassed the entire foreground, and in humans this causes masking without auditory induction. Consistent with previous human psychophysical results, macaques showed better performance with surrounding masking noise than interrupting noise designed to elicit induction. These and other control experiments provide evidence that primates may share a general mechanism to perceptually complete missing sounds.}, file_url = {/fileadmin/user_upload/files/publications/pdf3201.pdf}, web_url = {http://www.jneurosci.org/content/23/27/9155.long}, state = {published}, author = {Petkov CI{chrisp}; O'Connor KN; Sutter ML} } @Article{ 3204, title = {Brain structure and cognition in a community sample of elderly Latinos}, journal = {Neurology}, year = {2002}, volume = {59}, number = {3}, pages = {383}, note = {0028-3878 Journal Article}, file_url = {/fileadmin/user_upload/files/publications/pdf3204.pdf}, state = {published}, author = {Wu CC; Mungas D; Petkov C{chrisp}; Eberling JL; Zrelak PA; Buonocore MH; Brunberg JA; Haan MN; Jagust WJ} } @Article{ 3205, title = {Auditory scene analysis in dyslexics}, journal = {Neuroreport}, year = {2000}, volume = {11}, number = {9}, pages = {1967}, abstract = {It has been argued that dyslexics suffer from temporal sensory processing deficits which affect their ability to discriminate speech in quiet environments. The impact of auditory deficits on non-language aspects of perception, however, is poorly understood. In almost every natural-listening environment, one must constantly construct scenes of the auditory world by grouping and analyzing sounds generated by multiple sources. We investigated whether dyslexics have difficulties grouping sounds. The results demonstrate that dyslexics have an impairment in grouping auditory objects that depends both on the sounds}, note = {20339112 0959-4965 Journal Article}, file_url = {/fileadmin/user_upload/files/publications/pdf3205.pdf}, state = {published}, author = {Sutter ML; Petkov CI{chrisp}; Baynes K; O'Connor KN} } @Inbook{ 6030, title = {Multisensory Influences on Auditory Processing: Perspectives from fMRI and Electrophysiology}, year = {2012}, pages = {99-114}, abstract = {In this review, we discuss some of the results of early multisensory influences on auditory processing, and provide evidence that sensory integration occurs distributed and across several processing stages. In particular, we discuss some of the methodological aspects relevant for studies seeking to localize and characterize multisensory influences, and emphasize some of the recent results pertaining to speech and voice integration.}, web_url = {http://www.crcnetbase.com/doi/abs/10.1201/b11092-9}, editor = {Murray, M. M. , M. T. Wallace}, publisher = {CRC Press}, address = {Boca Raton, FL, USA}, series = {Frontiers in Neuroscience}, booktitle = {The neural bases of multisensory processes}, state = {published}, ISBN = {978-1-439-81217-4}, DOI = {10.1201/b11092-9}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Remedios R{ryan}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Inbook{ 6029, title = {Imaging Cross-Modal Influences in Auditory Cortex}, year = {2010}, volume = {2}, pages = {123-137}, abstract = {Recent studies have made considerable progress in understanding how our brain combines the information from different sensory modalities and much evidence about the cortical regions involved has been provided by functional magnetic resonance imaging. Imaging studies have, for example, shown that cross-modal influences occur already at early stages of auditory cortex. However, given our still limited understanding of the functional organization of human auditory cortex, these results are often to interpret with respect to the exact localization of cross-modal influences. Here we discuss a localization technique, which provides a functional map of individual fields in the auditory cortex of individual subjects. Using high-resolution imaging techniques in an animal model with known organization of auditory cortex, we proved the feasibility of this functional mapping technique and demonstrated its use in localizing cross-modal influences to individual auditory fields. Our results show that cross-modal influences already occur in secondary auditory cortices and increase along the auditory processing hierarchy. While these results provide good evidence that auditory processing can be affected by non-acoustic stimuli very early on, we also discuss the interpretability of these findings with regard to the underlying neuronal activity, which is considerable hampered by the still unknown neural basis of the fMRI signal.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4419-5615-6_8.pdf}, editor = {Kaiser, J. , M. J. Naumer}, publisher = {Springer}, address = {New York, NY, USA}, booktitle = {Multisensory Object Perception in the Primate Brain}, state = {published}, ISBN = {978-1-441-95614-9}, DOI = {10.1007/978-1-4419-5615-6_8}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Inbook{ 5827, title = {Cortical processing of vocal sounds in primates}, year = {2009}, month = {10}, pages = {135-147}, abstract = {The recent work on speech and vocal sound processing by the human brain finds itself at a crossroads with the studies in non-human primates on the neurobiological basis of vocal communication. Speech is a recent evolutionary adaptation, so direct animal homologs of the neural systems supporting speech perception are not expected. However, vocal expressions are richly informative for many social animals. Thus, the interest in how the human brain is processing the speaker‘s identity and affective aspects of the human voice, including the stimulus-bound aspects of speech, may be approached from an evolutionary perspective. From the other side, comparative biologists have started to close the gap between animal and human data by using the same noninvasive imaging techniques as those applied to the human brain, to study the brains of animals. Relying on the same techniques facilitates cross-species comparisons, and provides links to invasive studies of the brain processes at the neuronal level . In this chapter, we consider how the brains of primates analyze the features in vocal sounds, focusing in particular on the correspondence between the auditory cortex processes in the brains of monkeys, apes and man.}, web_url = {http://www.elsevier.com/wps/find/bookdescription.cws_home/719106/description#toc}, editor = {Brudzynski, S. M.}, publisher = {Academic Press}, address = {London, UK}, series = {Handbook of Behavioral Neuroscience ; 19}, booktitle = {Handbook of mammalian vocalization: an integrative neuroscience approach}, state = {published}, ISBN = {978-0-12-374593-4}, DOI = {10.1016/B978-0-12-374593-4.00014-0}, author = {Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis N{nikos}{Department Physiology of Cognitive Processes}} } @Inbook{ 4646, title = {Lost without a map: Pursuing primate homologies with functional imaging}, year = {2008}, pages = {175-194}, web_url = {https://www.novapublishers.com/catalog/product_info.php?cPath=23_131_104&products_id=6193&osCsid=2fd62a5ac914cbc0483f60dc36c7e958}, editor = {Liang, Y.}, publisher = {Nova Science Publ.}, address = {New York, NY, USA}, series = {Nova Biomedical}, booktitle = {Research topics on brain mapping}, state = {published}, ISBN = {978-1-60456-001-5}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2014, title = {The neurobiology of voice processing: What have we learned from neuronal recordings in voice-sensitive cortex?}, year = {2014}, month = {9}, day = {14}, volume = {5}, pages = {99-100}, web_url = {https://www.conftool.com/auditorycortex2014/index.php?page=browseSessions&print=yes&doprint=yes&form_room=2&mode=table&presentations=show}, event_name = {5th International Conference on Auditory Cortex: Towards a Synthesis of Human and Animal Research}, event_place = {Magdeburg, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2013, title = {Audio-visual interactions in neurons from voice-sensitive auditory cortex and the superior-temporal sulcus}, year = {2013}, month = {11}, day = {11}, volume = {43}, number = {453.09}, abstract = {During social communication, vocal and facial cues combine to form a coherent audiovisual percept. While electrophysiology studies have described crossmodal interactions at various sensory processing stages, it remains unclear how audiovisual influences occur at the neuronal level in face- or voice-sensitive areas. Here, we characterize visual influences from facial content on neuronal responses to vocalizations from a voice-sensitive region in the anterior supratemporal plane (aSTP) and the anterior superior-temporal sulcus (STS). We hypothesized that the STS, a typical multisensory region, would show greater specificity in visual-auditory interactions, while the aSTP would be mainly involved in auditory analysis, such as distinguishing between voice-identity or call-type features. Using dynamic face and voice stimuli, we recorded individual single neurons from both regions in the right hemisphere of two awake Rhesus macaques. To test the specificity of visual influences to behaviorally relevant stimuli, we included a set of audiovisual control stimuli, in which a voice was paired with a mismatched visual facial context. Within the aSTP we found an interesting division of neural sensitivity to vocal features: the sensitivity to call-type or speaker-identity was supported by two functionally distinct neuronal subpopulations within this area. In contrast, neurons in the STS were less sensitive to these vocal features. Multisensory response modulation was observed in both regions, while evoked responses to visual stimuli were more prevalent in the STS. Moreover, visual influences in the STS were modulated by speaker-related features and were reduced during stimulation with incongruent voice-face pairs. In contrast, visual influences in the aSTP showed little specificity for audio-visual congruency. Our results thus show that voice-sensitive cortex specializes in auditory analysis via a division of neuronal sensitivity while congruency-sensitive visual influences emerge to a greater extent in the STS. Together, our results highlight the transformation of audio-visual representations of communication signals across successive levels of the multisensory processing hierarchy in the primate temporal lobe.}, web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013}, event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2013_2, title = {Visual modulation of neurons in voice‐sensitive auditory cortex and the superior‐temporal sulcus}, year = {2013}, month = {11}, day = {8}, pages = {77}, abstract = {Effective social interactions can depend upon the receiver combining vocal and facial content to form a coherent audiovisual representation of communication signals. Neuroimaging studies have identified face- or voice-sensitive areas in the primate brain, some of which have been proposed as candidate regions for face-voice integration. However, it was unclear how audiovisual influences occur at the neuronal level within such regions and in comparison to classically defined multisensory regions in temporal association cortex. Here, we characterize visual influences from facial content on neuronal responses to vocalizations from a voice-sensitive region in the anterior supratemporal plane (STP) and the anterior superior-temporal sulcus (STS). Using dynamic face and voice stimuli, we recorded individual units from both regions in the right hemisphere of two awake Rhesus macaques. To test the specificity of visual influences to behaviorally relevant stimuli, we included a set of audiovisual control stimuli, in which a voice was paired with a mismatched visual facial context. Within the STP, our results show auditory sensitivity to various vocal features, which was not evident in STS units. We newly identify a functionally distinct neuronal subpopulation in the STP that carries the area’s sensitivity to voice-identity related characteristics. Audio-visual interactions were prominent in both areas, with direct crossmodal convergence being more prevalent in the STS. Moreover, visual influences modulated the responses of STS neurons with greater specificity, such as being more often associated with congruent voice-face stimulus pairings than STP neurons. Our results show that voice-sensitive cortex specializes in auditory analysis of vocal features while congruency-sensitive visual influences emerge to a greater extent in the STS. Together, our results highlight the transformation of audio-visual representations of communication signals across successive levels of the multisensory processing hierarchy in the primate temporal lobe.}, web_url = {http://www.med.upenn.edu/apan/assets/user-content/documents/Archived.APAN.2013.pdf}, event_name = {Tucker‐Davis Technologies Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2013)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2012, title = {Visual influences on neurons in voice-sensitive cortex}, year = {2012}, month = {10}, day = {15}, volume = {42}, number = {366.18}, abstract = {The brains of human and nonhuman primates are thought to contain brain regions that have specialized for processing voice and face content. Although voice- and face-sensitive regions have been primarily studied in their respective sensory modalities, recent human functional magnetic resonance imaging (fMRI) studies have suggested that cross-modal interactions occur in these regions. Here, we investigated whether, and how, neuronal spiking activity in a voice region is modulated by visual (face) stimulation. Using fMRI-guided electrophysiology, we targeted neurons in a voice-sensitive region in the right supra-temporal plane of two rhesus macaques. We used dynamic faces and voices for stimulation, including congruent and incongruent audiovisual pairs. Different stimuli by monkey and human callers were organized in a multifactorial design, to analyze the impact of the following factors on neuronal audiovisual influences: caller species, familiarity, and identity, and call type. Within this voice-sensitive region, we obtained recordings from 149 auditory responsive units, 45% of which demonstrated visual influences. The majority of the visual modulation was characterized by audiovisual responses that significantly deviated from the sum of the responses to both unimodal stimuli (i.e., non-additive multisensory influences). Contrasting monkey ‘coo’ calls with human-mimicked ‘coos’ revealed qualitatively similar, but quantitatively different audiovisual processing of conspecific relative to heterospecific voices; human calls elicited more sub-additive interactions than monkey calls. The call type and speaker identity factors interacted and significantly impacted upon both the direction and amplitude of the visual influences. Finally, familiar voices consistently elicited stronger audiovisual influences than unfamiliar voices, despite auditory responses being similar. Lastly, we compared the specificity of audiovisual interactions and the reliability of neuronal responses across congruent and incongruent audiovisual pairs. In some cases, we found neurons to be differentially affected by voice-face congruency, e.g., neurons were most sensitive to violating the congruency of a conspecific voice/face pairing as caused by substituting the monkey face with a human face. In conclusion, our study links to human fMRI studies on cross-sensory influences in voice/face regions, and the results describe the nature of the visual influences on neuronal responses in a voice-sensitive region in the primate brain. The results also help to characterize the stimulus feature-dependent influences on the cross-modal effects into this region.}, web_url = {http://www.abstractsonline.com/Plan/ViewAbstract.aspx?sKey=b012820a-b1ec-488f-8d44-68830bc3b213&cKey=4104b857-4524-4a2f-8a26-37f1c3fc0d7a&mKey=70007181-01c9-4de9-a0a2-eebfa14cd9f1}, event_name = {42nd Annual Meeting of the Society for Neuroscience (Neuroscience 2012)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2012_4, title = {Visual influences on neurons in voice-sensitive cortex}, year = {2012}, month = {10}, day = {12}, pages = {64}, abstract = {The brains of human and nonhuman primates are thought to contain brain regions that have specialized for processing voice and faces. Although voice- and face-sensitive regions have been primarily studied in their respective sensory modalities, recent human functional magnetic resonance imaging (fMRI) studies have suggested that cross-modal interactions occur in these regions. Here, we investigated whether, and how, neuronal activity in a voice region is modulated by visual (face) stimulation. Using fMRI-guided electrophysiology, we targeted neurons in a voice-sensitive region in the right supra-temporal plane of two rhesus macaques. We used dynamic faces and voices of different human and monkey individuals for stimulation, including congruent and incongruent audiovisual pairs. We observed robust non-additive visual influences of facial information on the auditory responses of neurons in this voice-sensitive region. In accordance with previous studies, the direction of the audiovisual interactions seemed primarily determined by the phase of visually-evoked theta oscillations at auditory stimulus onset. Yet, we found that, in addition, speaker-related stimulus features such as caller familiarity and identity and call type, studied within a multifactorial experimental design, differentially modulated the crossmodal effects. In particular, familiar voices consistently elicited larger audiovisual influences than unfamiliar voices, despite auditory responses being similar. Finally, we found neurons to be differentially sensitive to stimulus congruency: the specificity of audiovisual influences was disrupted when violating the congruency of a conspecific voice/face pairing by substituting the monkey face with a human face. In conclusion, our results describe the nature of the visual influences on neuronal responses in a voice-sensitive region in the primate brain. This study links to human fMRI studies on multisensory influences in voice/face regions, provides insights on the neuronal cross-modal effects in these regions and hypothesizes that neurons at facesensitive regions might show comparable multisensory influences from the auditory domain.}, web_url = {http://www.apan.jhu.edu/APAN2012_Abstracts.pdf}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2012)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2012_3, title = {Visual influences on neurons in voice-preferring cortex}, year = {2012}, month = {9}, day = {2}, volume = {4}, pages = {153}, abstract = {Many animals use face and voice information during communication, but it remains unclear how the brain integrates such cross-sensory input. Functional imaging evidence suggests that the brains of human and nonhuman primates contain voice- and face-preferring regions, and some human studies have reported that multisensory interactions occur in these regions. Yet, to date neurons in monkey voice/face regions have been studied exclusively with unisensory stimuli, or electrophysiological studies of voice/face interactions have focused on other parts of the brain. We investigated whether and how neurons in a monkey voice-preferring cluster would be modulated by multisensory (face/voice) influences. Using fMRI-guided electrophysiology, we targeted neurons in a voice-preferring fMRI cluster in the right hemisphere on the supra-temporal plane of two rhesus macaques. We used dynamic vocalizing faces and voices for stimulation, presented in auditory, visual and audiovisual conditions, including congruent and incongruent audiovisual pairs. The multifactorial experimental design also included stimuli from different familiar and unfamiliar monkeys and humans, and allowed us to analyze the impact of the following factors on the audiovisual modulation of neuronal responses: 'caller's species', 'caller's familiarity to the subject', 'caller's identity', and 'call type'. We obtained recordings from 149 auditory responsive units, 45% of which demonstrated visual influences. The vast majority of the visual modulation was characterized by audiovisual responses that significantly deviated from the sum of the responses to both unimodal stimuli (i.e., non-additive multisensory influences). Next we used ANOVA within our multifactorial design to analyze the impact of the different stimulus features on the population of responsive units. First, we found that human vocalizations (where humans imitated monkey 'coo' vocalizations) elicited similar visual modulation as monkey 'coo' calls, but were more likely to elicit sub-additive interactions than the monkey calls. This result suggests qualitatively similar but quantitatively different audiovisual processing of conspecific relative to heterospecific voices and faces, at least for heterospecific stimuli that involved humans imitating monkey coos. Second, while auditory responses were comparable across different speakers (but different for coo vs grunt calls), both the call type and speaker identity factors turned out to be significant when visual influences were considered. Finally, despite familiar and unfamiliar callers eliciting similar auditory responses, the caller familiarity factor had a significant effect on the visual modulation, with familiar voices consistently eliciting stronger audiovisual interactions than unfamiliar voices. These results suggest that some stimulus features differentially modulate the direction and/or magnitude of visual influences on neuronal auditory responses. We also compared neural responses to congruent vs. incongruent audiovisual pairs. We found that under a number of conditions the congruency/incongruency of the stimuli generally did not affect neuronal responses, except for one stimulus pairing involving a monkey voice/face combination where the original voice was replaced with a human voice. In this case, the majority of units selectively integrated the congruent, but not the incongruent, stimulus, and the reliability of the neuronal response was significantly decreased (as measured by a drop in the Fano factor in audiovisual responses to the incongruent pair). In summary, our results identify a considerable level of visual (face) influences on the auditory processing by neurons in an fMRI identified voice-preferring region in the primate brain. The results also showed stimulus-related specificity of the visual influences that provide insights on the type of multisensory influences in this region, which we discuss in relation to those that have been reported for other parts of the brain. These results extend our understanding of the multisensory influences evident at the neuronal level in primate voice-sensitive clusters, link to the fMRI studies in humans and hypothesize that face-sensitive regions would also show strong cross-sensory influences.}, web_url = {http://wp.unil.ch/auditorycortex2012/}, event_name = {4th International Conference on Auditory Cortex}, event_place = {Lausanne, Switzerland}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2012_2, title = {Visual Influences On Neurons In Voice-Sensitive Cortex}, year = {2012}, month = {7}, volume = {8}, number = {p038.23}, abstract = {Many animals use cross-sensory information during communication, but it remains unclear how the brain integrates face and voice information. Functional imaging evidence suggests that the brains of human and nonhuman primates contain voice- and face-sensitive regions, and some of the human studies have suggested that multisensory interactions occur in these regions. Yet, to date neurons in monkey voice/face regions have been studied exclusively with unisensory stimuli. We targeted neurons in a recently identified voice-sensitive cluster in the right hemisphere on the supratemporal plane to investigate how neurons in the monkey brain combine auditory voice and visual face information. Extracellular recordings were conducted in two Rhesus macaques participating in a visual fixation task. Dynamic face and voice stimuli (movies of vocalizing monkeys and humans imitating monkey “coo” calls) were presented in auditory only, visual only and audio-visual stimulation conditions, including congruent and incongruent audio-visual pairs. In this region, we identified spiking activity driven by the presence of auditory stimuli (n = 130 single- and multi-units), 42% of which demonstrated visual influences. Most of the visual modulation (36% of responsive units) consisted of nonadditive multisensory effects, where the audiovisual responses significantly deviated from the sum of both unimodal responses. The magnitude of the visual influences was differentially sensitive to stimulus features such as call type, speaker identity and familiarity. Human voices elicited qualitatively similar auditory and audiovisual responses as monkey voices. Finally, we found that incongruent stimuli elicited a larger proportion of sublinear audiovisual interactions, relative to congruent audiovisual pairs. Our results identify visual influences at the neuronal level in a primate auditory 'voice' region. Together, with results from functional imaging studies in humans, these findings extend our understanding of the multisensory influences at voice regions, which might also be evident in neurons at face-sensitive regions.}, web_url = {http://fens.ekonnect.co/FENS_331/poster_32304/program.aspx}, event_name = {8th Forum of European Neuroscience (FENS 2012)}, event_place = {Barcelona, Spain}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2011_3, title = {Electrophysiological characterization of an fMRI-identified voicepreferring region}, year = {2011}, month = {3}, pages = {818}, abstract = {A region of ‘voice’ clusters has recently been identified in the macaque auditory cortex with functional magneticresonance imaging (fMRI). These clusters show a strong fMRI activity preference for the voice of conspecific individuals and appear to functionally correspond to those from the known human voice region. In the visual system fMRI has been used to guide electrophysiological recordings from neurons in the monkey brain that were shown to be extremely selective for faces, so-called ‘face’ cells. We investigated whether fMRI-guided electrophysiology would reveal comparable levels of selectivity in one of the monkey voice clusters, and how the functional properties of those ‘voice’ cells compares to those of their visual counterparts. During fMRI acquisition and electrophysiological recordings, three categories of 12 sounds were used for stimulation: macaque vocalizations (MVocs), other animal vocalizations (AVocs), and natural sounds (NSnds). The sound categories were comparable in their low-level acoustical features, having been selected for this from a large set of sounds. We first used the stimuli during fMRI, as we have previously done, to identify clusters with a strong activity preference for MVocs. Then electrophysiological responses to the auditory stimuli were recorded from the anterior voice cluster in two awake macaques (total of 186 responsive single- and multi-units). A significant majority of the neurons (45%, Χ2-test: p = 0.0013) responded better to MVocs than to any of the other two complex natural sound categories. The area’s preference for MVocs was also present in the population spiking and local-field potential response, consistent with the fMRI results. Adapting the frequently employed criterion used to define ‘face’ cells as responding at least two-fold stronger to faces than to other objects, 25% of the neurons recorded could be classified as ‘voice’ cells. Finally, we evaluated the response selectivity to individual stimuli within the MVocs, and found that units in the voice area responded to an average of 27% of the MVocs stimuli. Our results suggest that a strong fMRI activity preference need not result from a large proportion of highly selective neurons, and describe a population of neurons with a preference for voices over other complex natural sounds. The proportion of identified ‘voice’ cells is comparable to what the majority of studies on ‘face’ cells report. However, ‘voice’ cells seem to be more selective for individual voices than ‘face’ cells, which have been shown to respond to ~62% of the face stimuli. This divergence in functional properties between ‘voice’ and ‘face’ cells may reflect evolutionary differences that have affected voice- and face- specialization in primate brains. Namely, the visual system appears to have specialized during vertebrate evolution to represent canonical facial features (e.g., two eyes, a nose and a mouth). By contrast, the auditory system could have had less opportunity to specialize for canonical auditory features, given that many animals modify the acoustics of their vocalizations to be distinct from those of other animals and to circumvent environmental noise.}, web_url = {https://www.nwg-goettingen.de/2011/default.asp?scientific_program}, event_name = {9th Göttingen Meeting of the German Neuroscience Society, 33rd Göttingen Neurobiology Conference}, event_place = {Göttingen, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2010_2, title = {A brain region consisting of neurons with moderate sensitivity for voices}, year = {2010}, month = {11}, day = {12}, web_url = {http://www.apan.jhu.edu/APAN2010_Program.pdf}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2010, title = {Visual influences on voice-sensitive neurons}, year = {2010}, month = {11}, day = {12}, web_url = {http://www.apan.jhu.edu/APAN2010_Program.pdf}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ 6048, title = {Encoding properties of neurons sensitive to species-specific vocalizations in the anterior temporal lobe of primates}, year = {2009}, month = {8}, volume = {3}, pages = {123-124}, abstract = {Human and monkey neuroimaging and monkey electrophysiological studies suggest that neurons in the anterior superior-temporal lobe are selective for species-specific vocalizations. To better understand the basis of this selectivity, we studied the coding properties of these neurons using extracellular recordings in the awake macaque. We used a paradigm based on a previous macaque fMRI study to localize with electrophysiological recordings a voice-sensitive region in the anterior superior-temporal plane that prefers species-specific vocalizations over other complex sound categories. This revealed a cluster of vocalization-preferring sites about 5mm anterior to the tonotopically organized field RT. To evaluate the neurons’ sensitivity to different vocal components, we used a set of 12 species-specific vocalizations and several acoustical manipulations of these calls. These controls involved, 1) preserved spectrum (PS) versions of the calls, 2) preserved envelope (PE) versions, i.e., pink noise shaped with the Hilbert extracted call envelope, and 3) preserved spectrum and envelope (PSE) versions, which combine the first-order spectral and temporal characteristics of the calls, i.e., their extracted frequency spectrum shaped with their envelope (see Figure 1). Comparing the responses to original calls and the controls, only 29% of the units significantly preferred one of these four categories, suggesting that the responses of many neurons are robust to our spectro-temporal manipulations. Of the selective units, the majority (60%) favored the preserved spectrum sounds (PS; Fig. 1), indicating that these neurons are more sensitive to spectral than temporal components. Yet, a linear response classifier, inferring the identity of a vocalization from a neuron’s single trial responses, better decoded the original calls than the controls in the population of neurons. In addition, we found that the neurons are more selective for and more sparsely encode the original calls than the acoustical controls. Noteworthy, in comparison to previous reports from the auditory core, belt, parabelt and insular regions, the neurons in the anterior superior-temporal plane were considerably more selective to individual vocalizations (Fig. 1D). We then tested whether these neurons encode acoustical, phonetic, properties of calls or their presumed functional meaning (semantics). More units discriminated between acoustically different sounds belonging to a similar semantic category (e.g., coo vs. grunt) than those that were acoustically similar and from different semantic categories (e.g., grunt vs. pant threat). These results suggest that neuronal responses at this stage of the auditory processing hierarchy are governed by the acoustics of the calls. In conclusion, evaluating single neuron responses to the features of species-specific vocalizations is clarifying the function of the voice-sensitive regions of the primate brain. Although many of the neurons did not prefer any specific call type, they were selective for and could well decode the species-specific vocalizations and their responses revealed some preference for the spectral features of the calls. Our findings suggest that these neurons encode the acoustical features of species-specific vocalization, such as the spectrum of formant frequencies, which can provide caller species and identity information perhaps independently of a vocalization’s content.}, web_url = {http://www.auditory-cortex.de/assets/pdf/AC2009_Program_End.pdf}, event_name = {3rd International Conference on Auditory Cortex: Current Concepts in Human and Animal Research (AC 2009)}, event_place = {Magdeburg, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Veit L{lveit}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ 6049, title = {Visual influences on voice-selective neurons in the anterior superior-temporal plane}, year = {2009}, month = {8}, volume = {3}, pages = {125}, abstract = {For social interaction and survival primates rely heavily on vocal and facial communication signals from their conspecifics. To date many studies have evaluated the unisensory representations of either vocal or facial information in regions thought to be voice or face selective. Other studies have directly evaluated the multisensory interactions of voices and faces but have focused on posterior auditory regions closer to the primary auditory cortex. This work investigates multisensory interactions at the neuronal level in an auditory region in the anterior superior temporal plane, which contains one of the regions important for processing voice-related information. Extracellular recordings were obtained from the auditory cortex of macaque monkeys, targeting an anterior voice region that we have previously described with functional magnetic resonance imaging (fMRI, Fig. 1A). For stimulation we used movies of vocalizing monkeys and humans which we matched in their low-level auditory and visual features. These dynamic face and voice stimuli allowed us to evaluate how neurons responded to auditory, visual or audio-visual components of the stimuli. Our experiments also contained control conditions consisting of several mismatched audiovisual stimuli combinations, such as 1) a voice matched to a face from a different species, 2) adding a temporal delay in the visual component of the stimulus, or 3) using an acoustically manipulated voice with the original facial stimulus. Our neuronal recordings identified a clustered population of voice-selective sites in the anterior superior temporal plane, ~5 mm anterior to the tonotopically organized field RT (Fig. 1B). A significant visual influence of the dynamic faces on the corresponding (matched) vocalizations was observed in both the local-field potential (LFP) and the spiking activity (analog multiunit activity, AMUA): 38% of the sites showed audiovisual interactions in the LFP signals, and 60% in the AMUA (Fig. 2). In addition, the multisensory influence was significantly stronger for the matching voice and face stimuli than to any of the incongruent (mismatched) control conditions, confirming the specificity of the cross-sensory influence on the neuronal activity. Our results provide evidence for visual influences in what has been characterized as an auditory ‘voice’ area. This visual modulation was specific for behaviorally relevant voice-face associations and demonstrates that the processing of voice-related information in higher auditory regions is influenced by multisensory input.}, web_url = {http://www.auditory-cortex.de/assets/pdf/AC2009_Program_End.pdf}, event_name = {3rd International Conference on Auditory Cortex: Current Concepts in Human and Animal Research (AC 2009)}, event_place = {Magdeburg, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ 6107, title = {Evaluating auditory network connectivity with combined microstimulation and functional imaging in the monkey}, year = {2009}, month = {2}, number = {1111}, abstract = {A high-level auditory-cortical region was recently identified with functional magnetic resonance imaging (fMRI) in rhesus monkeys. This brain region shows a close functional correspondence to the so called human-voice region. Both human and monkey ìvoice” regions lie anterior and superior on the temporal lobe and appear to be exquisitely sensitive to certain vocal components in species-specific vocalizations that help to identify other conspecific members of the species. To clarify the in-vivo functional connectivity of the rhesus monkey voice region along with its putative auditory cortical network we used microstimulation in combination with high-resolution fMRI. First we functionally localized the voice region with blood-oxygen-level-dependent (BOLD) fMRI, as previously described. Then we microstimulated this region with glass-coated iridium microelectrodes, using biphasic, cathode leading, 250 to 500 ?A pulses of 200 ?s duration. We used the fMRI BOLD response to evaluate the anterograde targets of the microstimulation site. Microstimulation of the voice region, which lies on the rostral superior-temporal plane (rSTP), elicited a BOLD response from hierarchically earlier auditory areas (feed-back), and the surrounding superior temporal plane (STP), gyrus (STG) and sulcus (STS) of the ipsilateral hemisphere. We next microstimulated an upper-bank STS region that was the target of the voice region. The STS microstimulation seemed to show more robust medial and orbital prefrontal cortex activity in comparison to microstimulation of the voice region on the STP. We are currently comparing these results to those obtained from microstimulating the earlier stages of the auditory cortical pathway and aim to compare our functional connectivity results to anatomical tractography from the analysis of retrograde and anterograde tracers placed in some of the microstimulated regions.}, web_url = {http://www.aro.org/archives/2009/2009_1111_e15f0394.html}, event_name = {32nd Annual Midwinter Meeting of the Association for Research in Otolaryngology (ARO 2009)}, event_place = {Baltimore, MD, USA}, state = {published}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kikuchi Y; Augath M{mark}{Department Physiology of Cognitive Processes}; Mishkin M; Rauschecker J; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ PetkovKGPL2008, title = {Functional imaging of sensitivity to components of the voice in monkey auditory cortex}, year = {2008}, month = {11}, volume = {38}, number = {851.19}, abstract = {A voice region has recently been identified in the monkey auditory cortex which prefers species-specific vocalizations and is sensitive to the vocal differences among monkey callers. To better understand the sensitivity of this and other brain regions for species-specific vocalizations, we independently manipulated two components in monkey vocal sounds. After recording the ‘coo’ vocalizations from 4 rhesus monkeys, we either shifted the position of the fundamental frequency (which is established by the vocal source in the larynx) or shaped the dispersion of the higher, formant frequencies (established by the acoustical filtering that occurs in the vocal tract above the larynx). The manipulations left the sounds within the species-typical range. We then used high-resolution functional imaging (fMRI) to evaluate the sensitivity of the rhesus monkey brain to independent changes of the two vocal components. Initial results revealed that many regions along the auditory-cortical processing hierarchy were sensitive to changes in both the fundamental and formant frequencies. In the earlier stages of cortical processing, especially in the caudal auditory regions, the sensitivity of the auditory fields to changes in the fundamental and formant frequencies depended upon each field’s topography of preferred sound frequency (tonotopy), which we mapped separately. For example, the sensitivity of the auditory field A1 to manipulation of the fundamental frequency occurred over this field’s low-frequency region, while the sensitivity to manipulation of the higher formant frequencies occurred over its high-frequency preferring region. The sensitivity of the anterior cortical regions to the components in the voice showed no such dependency upon their tonotopy. Notably, the monkey voice region, near to the temporal pole, showed better sensitivity to changes in the formant frequencies than to the fundamental. The results reveal a putative cortical network for vocal-sound processing and suggest that the voice region extracts information about speaker identity from cues present in the formant frequencies.}, web_url = {http://www.sfn.org/annual-meeting/past-and-future-annual-meetings}, event_name = {38th Annual Meeting of the Society for Neuroscience (Neuroscience 2008)}, event_place = {Washington, DC, USA}, state = {published}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Ghazanfar AA{asifg}{Department Physiology of Cognitive Processes}; Patterson RD; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 6109, title = {Voice Region Connectivity in the Monkey Assessed with Microstimulation and Functional Imaging}, year = {2008}, month = {11}, volume = {38}, number = {850.2}, abstract = {A “voice” region has recently been identified in the monkey auditory cortex with functional magnetic resonance imaging (fMRI) and electrophysiology, which shows a close functional correspondence to the known human-voice region. Both human and monkey voice regions lie anterior and superior on the temporal lobe and strongly prefer species-specific vocalizations over other categories of sounds and acoustical controls. The human and monkey voice regions are also sensitive to vocal differences among individuals and appear to be important centers for vocal sound processing within a network that is poorly understood. To clarify the in-vivo functional connectivity of the voice region in the rhesus monkey we used microstimulation in combination with high-resolution fMRI. First we functionally localized the voice region with blood-oxygen-level-dependent (BOLD) fMRI, as previously described. Then we microstimulated the voice region with glass-coated iridium microelectrodes, using biphasic, cathode leading, 250 to 500 μA pulses of 200 μs duration. We used the BOLD response to evaluate the anterograde targets of the microstimulation site. Microstimulation of the monkey voice region, which lies on the rostral superior-temporal plane (rSTP), elicited a BOLD response from hierarchically earlier auditory areas (feed-back), and the surrounding superior-temporal-plane (STP), -gyrus (STG) and -sulcus (STS) of the ipsilateral hemisphere. We observed no direct targets in the prefrontal cortex from voice region microstimulation, so we hypothesized that voice information might reach the frontal cortex indirectly. To test this idea we microstimulated a region in the upper bank of the STS that was one of the direct targets of the voice region, which resulted in medial and orbital prefrontal cortex activity, and neighboring regions on the STP, STG, STS and temporal pole. Our initial observations suggest that acoustical information from the voice region reaches the frontal cortex indirectly via other rostro-temporal regions such as the STS. Since the primate STS receives multisensory input and is known to contain face-recognition regions, we propose that voice information is paired with face information in the anterior temporal lobe before being transmitted to the prefrontal cortex.}, web_url = {http://www.sfn.org/annual-meeting/past-and-future-annual-meetings}, event_name = {38th Annual Meeting of the Society for Neuroscience (Neuroscience 2008)}, event_place = {Washington, DC, USA}, state = {published}, author = {Kikuchi Y; Rauschecker JP; Mishkin M; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ PerrodinKLP2008, title = {Multisensory integration of dynamic voices and faces in the monkey brain}, year = {2008}, month = {10}, volume = {9}, number = {8}, abstract = {Primates are social animals whose communication is based on their conspecifics' vocalizations and facial expressions. Although a lot of work to date has studied the unimodal representation of vocal or facial information, little is known about the way the nervous system supports the processing of communication signals from different sensory modalities to combine them into a coherent audiovisual percept. It is thought that the brains of human and nonhuman primates evaluate vocal expressions and facial information separately in specialized 'voice' and 'face' brain regions but we wondered if cross{sensory interactions were already evident at the neuronal level in these typically unimodal brain regions. Using movies of vocalizing humans and monkeys as stimuli, we recorded extracellularly from the auditory cortex of a macaque monkey, targeting his 'voice' region in the right hemisphere. Within a multi{factorial design we evaluated how these auditory neurons responded to different sensory modalities (auditory or visual) or combinations of modalities (audiovisual). We also analyzed the responses for species{specific effects (human/ monkey speaker), call type specificity (coo/ grunt), as well as speaker familiarity, size and identity. Following the approach in the original fMRI study localizing the monkey voice region, our recordings identified a voice area 'cluster' in this animal. Within this auditory cluster of sites, we observed a significant visual influence on both the local{field potential (LFP) and the spiking activity (AMUA), and found that 30% of the sites showed audiovisual interactions in the LFP signals, and 38% in the AMUA. Grunts were especially effective stimuli for this region and rather than a specialization for monkey vocalizations, human vocalizations also elicited strong responses. Our results provide evidence for visual in uences in what has been characterized as an auditory 'voice' area suggesting that at least the 'voice' regions are influenced by the visual modality. Voices and faces seem to already interact at traditionally unisensory brain areas, rather than cross{sensory information being combined only in higher-level, associative or multisensory regions of the brain.}, event_name = {9th Conference of the Junior Neuroscientists of Tübingen (NeNa 2008)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ 5887, title = {How visual context influences the acoustical processing in and around auditory cortex}, year = {2008}, month = {7}, volume = {6}, number = {020.10}, abstract = {Recent results from human imaging and electrophysiological studies promote the view that processing within auditory cortex can be influenced by cross-modal stimulation of other sensory modalities. Here we scrutinize the neuronal basis of these sensory interactions by probing regions in the monkey auditory pathway for multisensory influences using combinations of functional imaging (fMRI) and electrophysiology. Using functional imaging, we previously found that caudal fields of the auditory cortex show enhanced fMRI-BOLD responses when auditory stimuli were complemented by simultaneous visual or touch stimulation [see Kayser et al. Neuron 48, 2005 and J. Neurosci. 27(8), 2007]. This sensory interaction was much enhanced in the superior temporal regions but was less evident in anterior auditory fields and the insula. To validate these results at the level of individual neurons, we now record field potentials and single unit activity from these regions. We find that within caudal auditory cortex, only 12% of the neurons show cross-modal influences, such as response enhancement or suppression. This visual modulation occurs only for a narrow time window of stimulus onset asynchronies and is independent of the particular kind of stimulus used. In the acoustically responsive region of the insula a similar proportion of neurons show such kind of audio-visual interaction, while in the superior temporal region visual, auditory and multisensory neurons are spatially intermingled and occur in equal proportions. Our findings reveal how the presence of visual input increases along the auditory processing stream and demonstrate that already early auditory cortices are susceptible to cross-modal influences. As a consequence we conclude that the processing at these stages not only reflects acoustical stimuli but is also dependent on their visual context.}, web_url = {http://fens2008.neurosciences.asso.fr/}, event_name = {6th Forum of European Neuroscience (FENS 2008)}, event_place = {Geneva, Switzerland}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Remedios R{ryan}{Research Group Physiology of Sensory Integration}; Dahl CD{dahl}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 5130, title = {Whose voice is that? In pursuit of an animal model of vocal recognition}, year = {2008}, month = {4}, web_url = {http://www.ccg.unam.mx/en/news/days_molecular_medicine_2008}, event_name = {Days of Molecular Medicine 2008}, event_place = {Stockholm, Sweden}, state = {published}, author = {Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Patterson R; Ghazanfar AA{asifg}{Department Physiology of Cognitive Processes}; Logothetis N{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 5001, title = {Multisensory interactions in auditory cortex}, year = {2007}, month = {11}, volume = {37}, number = {620.15}, abstract = {An increasing body of literature provides compelling evidence that sensory convergence not only occurs in higher association areas, but also in lower sensory regions and even in primary sensory cortices. To scrutinize these early cross-modal interactions, we use the macaque auditory cortex as model and employ combinations of high-resolution functional imaging (fMRI) and electrophysiological recordings. Using function imaging in alert and anaesthetized animals, we reported that (only) caudal auditory fields are susceptible to cross-modal modulation: The fMRI-BOLD response in these regions was enhanced when auditory stimuli were complemented by simultaneous visual or touch stimulation [see Kayser et al. Neuron 48, 2005 and J. Neurosci. 27(8), 2007]. To investigate the neuronal basis of this cross-modal enhancement, we recorded the activity of local field potentials and single units in alert animals watching complex audio-visual scenes. Our results show the following: Visual stimuli by themselves, on average, do not drive auditory neurons, but cause responses in low frequency LFPs. Combining visual and auditory stimuli leads to enhanced responses in the low frequency LFP, but to a reduction of firing rates. This audio-visual interaction was significant at the population level, and for about 10% of the neurons when tested individually. The interaction occurs only for well-timed visual stimuli, is strongest when the visual stimulus leads the auditory stimulus by 20-80msec, but is independent of the image structure in the visual stimulus. Smilar visual modulation was found in the auditory core and belt. Our findings point to a very basic, stimulus unspecific visual input to auditory cortex and clearly support the notion that early sensory cortices are susceptible to cross-modal interactions. Especially, the finding that visual stimuli modulate the firing rates of individual neurons in auditory cortex suggests that the messages transmitted from these regions to higher processing stages do not only reflect acoustical stimuli but are also dependent on their visual context.}, web_url = {http://www.sfn.org/am2007/}, event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4586, title = {A voice-area in the primate brain: Enhanced representation of the "voice" of conspecifics}, year = {2007}, month = {9}, abstract = {The human voice not only transmits spoken language, but itself carries considerable meaning. Reflecting this importance, imaging studies have identified a region in the auditory cortex of the human brain that is sensitive to the human voice. For animals that cannot expand their vocal repertoire linguistically, the correct interpretation of the vocalizations of their conspecifics is of even greater importance for survival and social interactions. However, it is uncertain whether other primates share homologous voice regions or whether the human voice area is tightly linked to human language and thus unique. Here, we used high-resolution functional imaging (fMRI) of macaque monkeys to compare the strength of the activity response to conspecific vocalizations with that elicited by other sound categories, including the vocalizations of heterospecifics. We found several brain regions demonstrating a strong preference for conspecific vocalizations and identified a candidate ‘voice’ area located in the higher proc essing stages of auditory cortex, in the anterior portions of the superior-temporal plane (STP). In contrast, the corresponding well-known human voice area resides below the STP, highlighting the possibility of an evolutionary expansion and differentiation of the human auditory cortex away from the STP. The presence of a voice region in nonhuman primates supports the notion that such specialized areas do not depend on linguistic capabilities. In all cases, our findings suggest that the auditory cortex of other vocal animals possess regions that are specialized for processing the ‘voice‘ of conspecifics.}, web_url = {http://www.dzg-ev.de/de/veranstaltungen/externe/eec2007.php}, event_name = {International Symposium on Evolution of Emotional Communication (EEC 2007)}, event_place = {Hannover, Germany}, state = {published}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Whittingstall K{kevin}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4125, title = {Functional imaging of organization and specialization in the monkey auditory cortex}, journal = {Hearing Research}, year = {2007}, month = {7}, volume = {229}, number = {1-2}, pages = {239-240}, abstract = {We localized many fields in the auditory cortex of the macaque monkey and studied which auditory regions are specialized for processing the communication sounds of the species. First, we used high resolution fMRI at 4.7 and 7 T to functionally map the auditory cortex of behaving and of anesthetized monkeys. The identified fields included regions already well described by anatomical and neurophysiological techniques as well as those whose anatomical parcellation remained without functional support. To localize fields, we varied the frequencies of tonal or bandpassed-noise sounds, and obtained spatially specific activity patterns throughout much of auditory cortex. We then statistically tested the frequency-selective gradients within these regions of auditory cortex and the results suggest that 11 fields contain neurons tuned for the frequency of sounds. The obtained maps provide functional support for a model according to which three fields in primary auditory cortex (the auditory ‘core’) are surrounded by eight neighboring ‘belt’ fields in non-primary auditory cortex. Following this non-invasive mapping, we examined which of the localized fields, if any, were specialized for processing the communication sounds of these species in relation to other sounds. Natural sounds were presented as stimulation, including the vocalizations of conspecifics, of other animals, and other natural sounds. Control stimuli were also used. The vocalizations of conspecifics generally elicited greater responses throughout auditory cortex than did the other sounds. The strongest specificity for these vocalizations seemed to be in the anterior fields of auditory cortex, but also extended anteriorly outside of the auditory core and belt fields that were localized with tone and noise stimuli. The data suggest a specialization for the processing of species-specific vocalizations in the anterior portions of auditory cortex, including the poorly understood fields of the auditory parabelt. These fMRI data reflect ethological influences on brain organization and can help us to delineate neural networks in the nonhuman primate that are expected to have an evolutionary relationship to speech processing areas in the human brain.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0378595506003108}, event_name = {2nd International Conference on Auditory Cortex 2006: The Listening Brain}, event_place = {Grantham, UK}, state = {published}, DOI = {10.1016/j.heares.2006.11.003}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4612, title = {Morphing macaque vocalizations for behavioral and neurophysiological study}, year = {2007}, month = {7}, volume = {2007}, abstract = {Morphing gradually from one stimulus to another is useful for studying whether subjects perceive stimulus transitions gradually or categorically. Signal processing algorithms are now available for morphing human speech sounds, which had been more difficult to achieve than for visual objects. These techniques would be useful to implement for preparing ethologically relevant stimuli for behavioral and neurophysiological study with animal models, like the macaque (Macaca) species of monkeys. However, morphing complex sounds requires developing methods for selecting landmarks to guide the morphing process, which is difficult for classes of vocalizations that differ on many acoustical parameters. We describe a procedure for using the freely available STRAIGHT signal processing package to morph between: 1) harmonic ‘coo‘ vocalizations from two macaques, 2) different types of vocalizations from the same individual, a ‘coo&l squo; and a //!- -MFG_und--//lsquo;grunt‘, and 3) monkey and human vocalizations. We evaluated the quality of the morphs and obtained classification curves from human listeners who seemed to categorize the monkey vocalizations like those produced by humans. The procedure prepares macaque vocalizations for neuroethological study and the methods should also be useful for successful morphing between different classes of animal and human vocalizations.}, event_name = {Magnetresonanzzentrum Symposium 2007}, event_place = {Tübingen, Germany}, state = {published}, author = {Chakladar S{chakladar}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}} } @Poster{ 4855, title = {Multisensory Interactions in Auditory Cortex}, year = {2007}, month = {7}, volume = {10}, pages = {67}, abstract = {An increasing body of literature provides compelling evidence that sensory convergence not only occurs in higher association areas, but also in lower sensory regions and even in primary sensory cortices. To scrutinize these early cross-modal interactions, we use the macaque auditory cortex as model and employ combinations of high-resolution functional imaging (fMRI) and electrophysiological recordings. Using function imaging in alert and anaesthetized animals, we reported that (only) caudal auditory fields are susceptible to cross-modal modulation: The fMRI-BOLD response in these regions was enhanced when auditory stimuli were complemented by simultaneous visual or touch stimulation [1,2]. To investigate the neuronal basis of this cross-modal enhancement, we recorded the activity of local field potentials and single units in alert animals watching complex audio-visual scenes. Our results show the following: Visual stimuli by themselves, on average, do not drive auditory neurons, but cause responses in low frequency LFPs. Combining visual and auditory stimuli leads to enhanced responses in the low frequency LFP, but to a reduction of firing rates. This audio-visual interaction was significant at the population level, and for about 10% of the neurons when tested individually. The interaction occurs only for well-timed visual stimuli, is strongest when the visual stimulus leads the auditory stimulus by 20–80msec, but is independent of the image structure in the visual stimulus. Similar visual modulation was found in the auditory core and belt. Our findings point to a very basic, stimulus unspecific visual input to auditory cortex and clearly support the notion that early sensory cortices are susceptible to cross-modal interactions. Especially, the finding that visual stimuli modulate the firing rates of individual neurons in auditory cortex suggests that the messages transmitted from these regions to higher processing stages do not only reflect acoustical stimuli but are also dependent on their visual context.}, web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=kayser01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ KayserPAL2007, title = {Cross-modal integration of sensory information in auditory cortex}, journal = {Neuroforum}, year = {2007}, month = {4}, volume = {13}, number = {Supplement}, pages = {835}, abstract = {Traditionally it is assumed that information from different sensory systems merges in higher association cortices. Contrasting this belief, we demonstrate cross-modal integration in primary and secondary auditory cortex. Using a combination of high-resolution functional magnetic resonance imaging (fMRI) and electrophysiological recordings in macaque monkeys, we quantify the integration of visual and tactile stimulation with auditory processing. Integration manifests as enhancement of activity that exceeds a simple linear superposition of responses, i.e. auditory activity is enhanced by the simultaneous presentation of non-auditory stimuli. Audio-somatosensory integration is reliably found at the caudal end and along the lateral side of the secondary auditory cortex. Regions with significant integration respond to auditory but only few to somatosensory stimulation. Yet, combining both stimuli significantly enhances responses. This enhancement obeys the classical rules for cross-modal integration: it occurs only for temporally coincident stimuli and follows the principle of inverse effectiveness; integration is stronger for less effective stimuli. Audio-visual integration is similarly found along the caudal end of the temporal plane in secondary auditory cortex, but also extends into primary auditory fields. Complementing these results from functional imaging, enhancement of neuronal activity is found in electrophysiological recordings of single neuron and population responses. Hence, we conclude that cross-modal integration can occur very early in the processing hierarchy - at the earliest stage of auditory processing in the cortex. Further, this multisensory integration occurs pre-attentive, as demonstrated in anaesthetized animals. Such early integration might be necessary for quick and consistent interpretation of our world and might explain multisensory illusions where a stimulus perceived by one modality is altered by a stimulus in another modality.}, web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf}, event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference}, event_place = {Göttingen, Germany}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4126, title = {Organization and specialization of the monkey auditory cortex revealed with MR imaging}, year = {2006}, month = {10}, day = {13}, web_url = {http://www.apan.jhu.edu/Program_APANIV.htm}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN IV)}, event_place = {Atlanta, GA, USA}, state = {published}, author = {Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Logothetis N{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4123, title = {Functional imaging of organization and specialization in the monkey auditory cortex}, year = {2006}, month = {10}, volume = {36}, number = {344.9}, abstract = {We localized numerous fields in the auditory cortex of the macaque monkey and studied which regions are specialized for processing the communication sounds of the species. First, we used high resolution fMRI at 4.7 and 7 Tesla to functionally map the auditory cortex of behaving and of anesthetized monkeys. The identified fields included regions already well described by anatomical and neurophysiological techniques as well as those whose anatomical parcellation remained without functional support. To localize fields, we varied the frequency content of tonal or band-passed-noise sounds, and obtained spatially specific activity patterns throughout much of auditory cortex. We then statistically tested the frequency-selective gradients within these regions of auditory cortex and the results suggest that 11 fields contain neurons tuned for the frequency of sounds. The obtained maps provide functional support for a model according to which three fields in primary auditory cortex (the auditory ‘core’) are surrounded by eight neighboring ‘belt’ fields in non-primary auditory cortex. Following this non-invasive mapping, we examined which of the localized fields, if any, were specialized for processing the communication sounds of these species in relation to other sounds. Natural sounds were presented as stimulation, including the vocalizations of conspecifics, of other animals, and other natural sounds. Control stimuli were also used. The vocalizations of conspecifics generally elicited greater responses throughout auditory cortex than did the other sounds. The strongest specificity for these vocalizations seemed to be in the anterior fields of auditory cortex, but also extended anteriorly outside of the auditory core and belt fields that were localized with tone and noise stimuli. The data suggest a specialization for the processing of species-specific vocalizations in the anterior portions of auditory cortex, including the poorly understood fields of the auditory parabelt. These fMRI data reflect ethological influences on brain organization and can help us to delineate neural networks in the nonhuman primate that are expected to have an evolutionary relationship to speech processing areas in the human brain.}, file_url = {/fileadmin/user_upload/files/publications/Petkov_et_al_SFN%20Abstract%2006_final_4123[0].pdf}, web_url = {http://www.sfn.org/index.aspx?pagename=abstracts_ampublications}, event_name = {36th Annual Meeting of the Society for Neuroscience (Neuroscience 2006)}, event_place = {Atlanta, GA, USA}, state = {published}, author = {Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4124, title = {Functional imaging of sensory integration in auditory cortex}, year = {2006}, month = {6}, event_name = {6th Congress of the Federation of European Psychophysiology Societies (FEPS 2006)}, event_place = {Budapest, Hungary}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 4122, title = {Integration of sensory information in auditory cortex}, year = {2006}, month = {6}, pages = {57}, abstract = {Traditionally it is assumed that information from different senses is integrated only in higher association cortices. Contrasting this belief, we demonstrate multisensory integration in areas proximal to primary sensory areas - in the so called auditory belt. Using a combination of high-resolution functional magnetic resonance imaging (fMRI) in electrophysiological recordings in macaque monkeys, we quantify the integration of audio-visual and audio-tactile stimulation. Integration of auditory noise with tactile stimulation of the hand is reliably found in anaesthetized animals at the posterior end and along the lateral side of the auditory belt. This integration occurs only for temporally coincident stimuli and obeys the principle of inverse effectiveness: integration is stronger for less effective stimuli. Locations with significant integration responded to auditory alone stimulation but only few to tactile alone. Combining visual and auditory stimulation, robust multisensory integration in auditory cortex was found in alert animals, but only weaker in anaesthetized animals. Similar to audio-tactile integration, the audio-visual interaction was found in areas of the belt. Together our results suggest that touch and vision related activity in auditory cortex arise from a different set of projections. Touch related information arrives as feed-forward input, vision related input arrives in a top-down fashion. Our findings demonstrate that multisensory integration can occur early in the processing hierarchy - one processing stage above primary auditory cortex. Further, this multisensory integration occurs pre-attentive, as demonstrated in anaesthetized animals. Such early integration might be necessary for quick and consistent interpretation of our world and might explain multisensory illusions where a stimulus perceived by one modality is altered by a stimulus in another modality.}, web_url = {http://www.areadne.org/2006/}, event_name = {AREADNE 2006: Research in Encoding and Decoding of Neural Ensembles}, event_place = {Santorini, Greece}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 3539, title = {fMRI of Macaque Auditory Cortex in Awake and in Anesthetized Animals}, year = {2005}, month = {11}, volume = {35}, number = {851.5}, abstract = {Functional magnetic resonance imaging (fMRI) with non-human primates is invaluable because localized patterns of activity can guide subsequent neurophysiological recordings. However, it is unknown whether fMRI of the macaque monkey can reveal reliable auditory activations consistent with known properties of primate auditory cortical fields (ACFs). We used high-field (4.7- and 7-Tesla) fMRI to image the blood-oxygen level dependent response (BOLD) of auditory cortex in awake and in anesthetized macaques. For awake-animal imaging we trained a macaque to complete long duration trials of visual fixation in combination with minimal body movement. Scanning this animal at 7T during sound presentation revealed robust activity over auditory cortex in the superior temporal plane. A paradigm where stimulation alternated with image acquisition revealed greater auditory activity than continuous imaging where sound stimulation must compete with the scanner noise. Imaging data with more extensive sound stimulation was obtained from anesthetized animals since these experiments allow for quicker data acquisition. Here, we used sounds varying in center frequency and bandwidth as have neurophysiological experiments mapping the basic organizational properties of macaque ACFs. In the antero-posterior direction, regions within the lateral sulcus were selective for sounds with low and high center frequencies, revealing expected frequency selective gradients (tonotopy) with multiple mirror reversals of these gradients. In comparison to tonal stimulation, sounds with greater spectral bandwidth activated more lateral and medial portions of the superior temporal plane, consistent with this activity occurring over non-primary ACFs. In summary, high-field fMRI reveals the global organization of macaque auditory cortex and will be important for helping us to understand how the primate auditory cortex is functionally organized.}, web_url = {http://www.sfn.org/absarchive/}, event_name = {35th Annual Meeting of the Society for Neuroscience (Neuroscience 2005)}, event_place = {Washington, DC, USA}, state = {published}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 3549, title = {Integration of touch and sound in auditory cortex}, year = {2005}, month = {11}, volume = {35}, number = {388.6}, abstract = {Our different senses provide complementary views of the environment, and integrating information across senses is necessary for disambiguating sensory objects and for reliable interaction with these. Supposedly, multisensory information is integrated only by higher cortical association areas. Contrasting this belief, we demonstrate multisensory integration in areas proximal to primary sensory areas - in the so called auditory belt. Using functional magnetic resonance imaging (fMRI) of macaque monkeys, we quantified the integration of simultaneous audio-visual and audio-tactile stimulation in anaesthetized animals at 4.7Tesla. Technically, integration was assumed if the response to the combined stimulus was stronger than the sum of the responses to individual stimuli. Integration of auditory broad-band noise with tactile stimulation of hands and foot was found at the posterior end and along the lateral side of the auditory belt in six animals. This integration occurred only for temporally coincident stimuli and obeyed the principle of inverse effectiveness: integration was stronger for less effective stimuli. Voxels with significant integration responded to auditory alone stimulation but only few to tactile alone. Combining visual and auditory stimulation in different paradigms we could not find robust multisensory integration in auditory cortex. Further, audio-tactile integration was mostly limited to auditory cortex and much weaker in nearby ‘multimodal’ areas such as the claustrum. Our findings demonstrate that multisensory integration can occur early in the processing hierarchy - one processing stage above primary auditory cortex. Further, this multisensory integration occurred pre-attentive - as demonstrated in anaesthetized animals. Such early integration might be necessary for quick and consistent interpretation of our world and might explain multisensory ‘illusions’ where a stimulus perceived by one modality is altered by a stimulus in another modality.}, web_url = {http://www.sfn.org/absarchive/}, event_name = {35th Annual Meeting of the Society for Neuroscience (Neuroscience 2005)}, event_place = {Washington, DC, USA}, state = {published}, author = {Kayser C{kayser}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}; Augath M{mark}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Poster{ 3206, title = {Scene analysis in macaque primary auditory cortical (A1) neurons: perceptual fill-in and masking}, year = {2003}, month = {11}, volume = {33}, number = {182.21}, abstract = {Auditory fill-in (FI) is the perceptual completion of a sound that is interrupted by another sound. An illusion can be created when a discontinuity or a silent gap is introduced into a sound and a temporally coincident (interrupting) noise is superimposed on this gap. Under these conditions the sound is heard as continuing through the noise even though part of the sound was deleted. We hypothesized that the responses of A1 neurons to discontinuous tones interrupted by noise would be similar to the responses to pure tones (i.e., a correlate of FI). In contrast, we hypothesized that these neurons’ responses to a discontinuous tone presented with noise temporally surrounding the entire tone (masking noise) would be similar to responses to the noise by itself (i.e., a correlate of masking where one would only be able to hear the noise). Extracellular recordings were obtained from A1 neurons in 2 macaques. Most neurons could easily distinguish discontinuous from continuous tones according to signal detection theory based analyses. Although noise reduced the ability to detect the discontinuity (gap), neurons tended to be more sensitive in detecting the gap in the tone with masking than with interrupting noise, consistent with the psychophysical literature comparing masking and fill-in. Moreover, the response of many neurons to interrupting noise centered over the gap was quantitatively similar to responses to a complete tone, suggesting a neural correlate to FI. Conversely, neural responses to a gap in the tone with masking noise was similar to the response to the noise presented in isolation, as if being masked. We conclude that primate A1 is involved in the analysis of noisy auditory scenes and has responses that both represent the background noise and the illusory segment of the foreground tone.}, web_url = {http://www.sfn.org/index.aspx?pagename=annualmeeting_futureandpast}, event_name = {33rd Annual Meeting of the Society for Neuroscience (Neuroscience 2003)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Petkov C{chrisp}; O'Connor KN; Sutter ML} } @Conference{ Perrodin2014, title = {Auditory and audiovisual specificity for processing communication signals in the superior temporal lobe}, year = {2014}, month = {6}, day = {13}, pages = {28}, abstract = {Effective social interactions can depend upon the receiver combining vocal and facial content to form a coherent audiovisual representation of communication signals. Neuroimaging studies have identified face- or voice-sensitive areas in the primate temporal lobe, some of which have been proposed as candidate regions for face-voice integration. However, so far neurons in these areas have been primarily studied in their respective sensory modality. In addition, these higher-level sensory areas are typically not prominent in current models of multisensory processing, unlike early sensory and association cortices. Thus, it was unclear how audiovisual influences occur at the neuronal level within such regions, especially in comparison to classically defined multisensory regions in temporal association cortex. Here I will present data exploring auditory (voice) and visual (face) influences on neuronal responses to vocalizations, that were obtained using extracellular recordings targeting a voice-sensitive region of the anterior supratemporal plane and the neighboring superior-temporal sulcus (STS) in awake rhesus macaques. Our findings suggest that within the superior temporal lobe, neurons in voice-sensitive cortex specialize in the auditory analysis of vocal features while congruency-sensitive visual influences emerge to a greater extent in STS neurons. These results help clarify the audiovisual representation of communication signals at two stages of the sensory pathway in primate superior temporal regions, and are consistent with reversed gradients of functional specificity in unisensory vs multisensory processing along their respective hierarchies.}, web_url = {http://uvtapp.uvt.nl/fsw/spits.ws.frmShowpage?v_page_id=3859096609314761}, event_name = {15th International Multisensory Research Forum (IMRF 2014)}, event_place = {Amsterdam, The Netherlands}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}} } @Conference{ KikuchiAMWP2013, title = {Cortical oscillations and spiking activity associated with Artificial Grammar Learning in the monkey auditory cortex}, year = {2013}, month = {11}, day = {8}, web_url = {http://www.med.upenn.edu/apan/assets/user-content/documents/Archived.APAN.2013.pdf}, event_name = {Tucker‐Davis Technologies Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2013)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Kikuchi Y; Attaheri A; Milne A; Wilson B; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ WilsonCSHSMP2011, title = {Behavioural and functional imaging analysis of "artificial-grammar" sequence learning in Rhesus macaques}, year = {2011}, month = {11}, day = {11}, web_url = {http://www.apan.jhu.edu/APAN2011_Program.pdf}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN 2011)}, event_place = {Washinton, DC, USA}, state = {published}, author = {Wilson B; Collison MG; Slater H; Hunter DM; Smith K; Marslen-Wilson W; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ PerrodinKLP2011_2, title = {Voice cells in the primate temporal lobe}, year = {2011}, month = {10}, volume = {12}, pages = {14}, abstract = {Communication signals are important for social interactions and survival and are thought to receive specialized processing in the visual and auditory systems. Whereas the neural processing of faces by face clusters and face cells has been repeatedly studied, less is known about the neural representation of voice content. Recent functional magnetic resonance imaging (fMRI) studies have localized voice-preferring regions in the primate temporal lobe, but the hemodynamic response cannot directly assess neurophysiological properties. We investigated the responses of neurons in an fMRI-identified voice cluster in awake monkeys, and here we provide the first systematic evidence for voice cells. "Voice cells" were identified, in analogy to "face cells", as neurons responding at least 2-fold stronger to conspecific voices than to "nonvoice" sounds or heterospecific voices. Importantly, whereas face clusters are thought to contain high proportions of face cells responding broadly to many faces, we found that voice clusters contain moderate proportions of voice cells. Furthermore, individual voice cells exhibit high stimulus selectivity. The results reveal the neurophysiological bases for fMRI-defined voice clusters in the primate brain and highlight potential differences in how the auditory and visual systems generate selective representations of communication signals.}, event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ PerrodinKLP2011_4, title = {Voice-sensitive neurons in the primate brain}, year = {2011}, month = {9}, pages = {60}, abstract = {The brain is thought to generate selective and efficient representations of important sensory events such as communicative signals, yet the various sensory systems might instantiate such selective representations in different ways. Since the 1980s the processing of facial information by ‘face’ cells has been repeatedly studied. Although auditory ‘voice’ regions showing a strong fMRI activity preference for the voice of conspecific individuals have now been identified in humans and monkeys, the fMRI signal cannot specify the encoding properties of the underlying neurons or whether fMRI voice-preferring clusters contain ‘voice cells’. We investigated the responses of neurons in an fMRI-identified voice cluster in awake macaque monkeys and provide the first systematic evidence for voice cells, defined, in analogy to face cells, as neurons responding at least two-fold stronger to conspecific voices than to heterospecific animal voices or natural/environmental sounds. Surprisingly, whereas face clusters contain high proportions of face-preferring cells that respond broadly to many faces, we found a considerable yet, by comparison, moderate proportion of voice-preferring cells that exhibited a sparse-coding strategy for voices. The observed selective representation for individual voices might stem from the different evolutionary pressures that would have affected how the auditory system has specialized relative to the visual. In all cases, our results highlight the interesting processing strategies used by the primate brain to encode auditory and visual components of communication signals.}, web_url = {https://www.unil.ch/ln/files/live/sites/ln/files/shared/FENS-IBRO_2011_booklet.pdf}, event_name = {FENS‐IBRO Training Center: Imaging Brain Function in Animals and Humans}, event_place = {Lausanne, Switzerland}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ 7057, title = {A brain region consisting of neurons with moderate sensitivity for voices}, year = {2010}, month = {11}, volume = {40}, number = {125.3}, abstract = {A region of "voice" clusters has recently been identified in the macaque auditory cortex with functional magnetic-resonance imaging (fMRI). These clusters show a strong fMRI activity preference for the voice of conspecifics and appear to functionally correspond to those from the known human voice region. In the visual system fMRI has been used to guide electrophysiological recordings from neurons in the monkey brain that were shown to be highly selective for faces [1]. We investigated whether fMRI-guided electrophysiology would reveal comparable levels of selectivity in one of the recently identified monkey voice clusters [2]. During fMRI acquisition and electrophysiological recordings, three categories of 12 sounds were used for stimulation: macaque vocalizations (MVocs), other animal vocalizations (AVocs), and natural sounds (NSnds). The sound categories were comparable in their low-level acoustical features, having been selected for this from a large set of sounds. We first used the stimuli during fMRI, as we have previously done, to identify the clusters with a strong activity preference for MVocs. Then electrophysiological responses to the auditory stimuli were recorded from the anterior voice cluster in two awake macaques (total of 193 responsive single- and multi-units, from 125 sites). Both monkeys showed moderate neuronal response preferences for MVocs over the other sound categories (respectively, 41% and 29% preference for MVocs in the unit activity of each animal), even if the analysis focused on the focal cluster in each animal with maximal selectivity for MVocs (respectively, 72% and 73% preference for MVocs). Our results suggest that a strong fMRI activity preference need not result from a large proportion of highly selective neurons. This is the case even if methodological differences may have somewhat affected the neuronal selectivity differences observed between our study and the previous macaque work on face processing, which resulted in 96% and 84% selectivity for faces in two animals [1]. In all cases, our results may reflect evolutionary differences that have affected voice and face selectivity. Namely, the visual system appears to have specialized during vertebrate evolution to represent canonical facial features (e.g., two eyes, a nose and a mouth). By contrast, the auditory system could have had less opportunity to specialize, given that many animals modify the acoustics of their vocalizations to be distinct from those of other animals and to circumvent environmental noise.}, file_url = {fileadmin/user_upload/files/publications/2011/Neuroscience-2010-Perrodin.pdf}, web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main}, event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ 7058, title = {Visual influences on voice-sensitive neurons}, year = {2010}, month = {11}, volume = {40}, number = {125.4}, abstract = {Many animals depend upon vocal and facial communication signals for survival and social interactions, but it remains unclear how voices and faces are integrated by the brain. Most studies have evaluated the unisensory processing of either vocal or facial information in brain regions thought to be "voice" or "face" sensitive. Other studies have described multisensory interactions in the brain for voices and faces, but only for a few brain regions, such as those close to the primary auditory cortex or in the prefrontal cortex. This work aims to address whether the responses of neurons in a voice-sensitive brain region, which was recently identified in monkeys with functional MRI, are influenced by faces. Extracellular recordings were conducted in two awake rhesus macaques. We targeted the anterior voice-sensitive cluster on the superior temporal plane, which was first localized for each animal with fMRI [please see the linked presentation] and resides ~5 mm anterior to the tonotopically organized field RT. For stimulation we used movies of vocalizing monkeys and humans that were matched in their low-level auditory and visual features. These dynamic face and voice stimuli were presented in auditory only, visual only or audio-visual stimulation conditions. Neuronal responses to the stimuli yielded a total of 318 local-field potential (LFP) sites and 208 single- and multi-units. Significant multisensory interactions were observed in 70% of the LFP sites and in 33% of the single- and multi-unit responses. We observed both suppression and enhancement of the neuronal responses to the audio-visual condition compared to the auditory condition, as previously noted for neurons in other brain regions. Notably, human voices were as efficient in driving the neuronal responses as were the monkey voices and elicited similar audiovisual interactions, questioning the species-specificity of the voice-sensitive regions. Our results provide evidence for visual influences in what has been characterized as an auditory "voice" region. This suggests that, rather than conducting strictly unisensory processing, neurons in the voice region (and potentially also the face region) form an integral part of a network engaged in the processing of communication signals from the different sensory modalities.}, file_url = {fileadmin/user_upload/files/publications/2011/Neuroscience-2010-Perrodin-2.pdf}, web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main}, event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov CI{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ 5962, title = {Visual influences on voice-selective neurons in the anterior superior-temporal plane}, year = {2009}, month = {6}, day = {30}, volume = {10}, number = {539}, abstract = {For social interaction and survival primates rely heavily on vocal and facial communication signals from their conspecifics. To date many studies have evaluated the unisensory representations of either vocal or facial information in regions thought to be “voiceâ€? or “faceâ€? selective. Other studies have directly evaluated the multisensory interactions of voices and faces but have focused on posterior auditory regions closer to the primary auditory cortex. This work investigates multisensory interactions at the neuronal level in an auditory region in the anterior superior temporal plane, which contains one of the important regions for processing “voiceâ€?-related information. Extracellular recordings were obtained from the auditory cortex of macaque monkeys, targeting an anterior “voiceâ€? region that we have previously described with functional magnetic resonance imaging (fMRI). For stimulation we used movies of vocalizing monkeys and humans which we matched in their low-level auditory and visual features. These dynamic face and voice stimuli allowed us to evaluate how neurons responded to auditory, visual or audio-visual components of the stimuli. Our experiments also contained control conditions consisting of several mismatched audiovisual stimuli combinations, such as 1) a voice matched to a face from a different species, 2) adding a temporal delay in the visual component of the stimulus, or 3) using an acoustically manipulated voice with the original facial stimulus. Our neuronal recordings identified a clustered population of voice-selective sites in the anterior superior temporal plane, ~5 mm anterior to field RT. A significant visual influence of the dynamic faces on the corresponding (“matchedâ€?) vocalizations was observed in both the local-field potential (LFP) and the spiking activity (analog multiunit activity, AMUA): 38% of the sites showed audiovisual interactions in the LFP signals, and 60% in the AMUA. In addition, the multisensory influence was significantly stronger for the matching voice and face stimuli than to any of the incongruent (“mismatchedâ€?) control conditions, confirming the specificity of the cross-sensory influence on the neuronal activity. Our results provide evidence for visual influences in what has been characterized as an auditory ‘voice’ area. This visual modulation was specific for behaviorally relevant voice-face associations and demonstrates that the processing of voice related information in higher auditory regions can be influenced by multisensory input.}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/539}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, event_place = {New York City, USA}, state = {published}, author = {Perrodin C{cperrodin}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Petkov C{chrisp}{Department Physiology of Cognitive Processes}} } @Conference{ PetkovKASL2005, title = {High-Field fMRI Reveals Auditory Cortical Fields in the Macaque Monkey}, year = {2005}, month = {11}, day = {11}, web_url = {http://www.apan.jhu.edu/Program_APANIII.htm}, event_name = {Tucker-Davis Symposium on Advances and Perspectives in Auditory Neurophysiology (APAN III)}, event_place = {Washinton, DC, USA}, state = {published}, author = {Petkov CI{chrisp}{Department Physiology of Cognitive Processes}; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}; Augath M{mark}{Department Physiology of Cognitive Processes}; Steudel T{steudel}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} }