@Article{ SchindlerB2016_2, title = {Connectivity Reveals Sources of Predictive Coding Signals in Early Visual Cortex during Processing of Visual Optic Flow}, journal = {Cerebral Cortex}, year = {2017}, month = {5}, volume = {27}, number = {5}, pages = {2885-2893}, abstract = {Superimposed on the visual feed-forward pathway, feedback connections convey higher level information to cortical areas lower in the hierarchy. A prominent framework for these connections is the theory of predictive coding where high-level areas send stimulus interpretations to lower level areas that compare them with sensory input. Along these lines, a growing body of neuroimaging studies shows that predictable stimuli lead to reduced blood oxygen level-dependent (BOLD) responses compared with matched nonpredictable counterparts, especially in early visual cortex (EVC) including areas V1–V3. The sources of these modulatory feedback signals are largely unknown. Here, we re-examined the robust finding of relative BOLD suppression in EVC evident during processing of coherent compared with random motion. Using functional connectivity analysis, we show an optic flow-dependent increase of functional connectivity between BOLD suppressed EVC and a network of visual motion areas including MST, V3A, V6, the cingulate sulcus visual area (CSv), and precuneus (Pc). Connectivity decreased between EVC and 2 areas known to encode heading direction: entorhinal cortex (EC) and retrosplenial cortex (RSC). Our results provide first evidence that BOLD suppression in EVC for predictable stimuli is indeed mediated by specific high-level areas, in accord with the theory of predictive coding.}, web_url = {https://academic.oup.com/cercor/article-pdf/27/5/2885/14142141/bhw136.pdf}, state = {published}, DOI = {10.1093/cercor/bhw136}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Article{ SchindlerB2016, title = {Visual high-level regions respond to high-level stimulus content in the absence of low-level confounds}, journal = {NeuroImage}, year = {2016}, month = {5}, volume = {132}, pages = {520–525}, abstract = {High-level regions of the ventral stream exhibit strong category selectivity to stimuli such as faces, houses, or objects. However, recent studies suggest that at least part of this selectivity stems from low-level differences inherent to images of the different categories. For example, visual outdoor and indoor scenes as well as houses differ in spatial frequency, rectilinearity and obliqueness when compared to face or object images. Correspondingly, scene responsive para-hippocampal place area (PPA) showed strong preference to low-level properties of visual scenes also in the absence of high-level scene content. This raises the question whether all high-level responses in PPA, the fusiform face area (FFA), or the object-responsive lateral occipital compex (LOC) may actually be explained by systematic differences in low-level features. In the present study we contrasted two classes of simple stimuli consisting of ten rectangles each. While both were matched in visual low-level features only one class of rectangle arrangements gave rise to a percept compatible with a high-level 3D layout such as a scene or an object. We found that areas PPA, transverse occipital sulcus (TOS, also referred to as occipital place area, OPA), as well as FFA and LOC showed robust responses to the visual scene class compared to the low-level matched control. Our results suggest that visual category responsive regions are not purely driven by low-level visual features but also by the high-level perceptual stimulus interpretation.}, web_url = {http://www.sciencedirect.com/science/article/pii/S105381191600207X}, state = {published}, DOI = {10.1016/j.neuroimage.2016.03.011}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Article{ SchindlerB2015, title = {Motion parallax links visual motion areas and scene regions}, journal = {NeuroImage}, year = {2016}, month = {1}, volume = {125}, pages = {803–812}, abstract = {When we move, the retinal velocities of objects in our surrounding differ according to their relative distances and give rise to a powerful three-dimensional visual cue referred to as motion parallax. Motion parallax allows us to infer our surrounding's 3D structure as well as self-motion based on 2D retinal information. However, the neural substrates mediating the link between visual motion and scene processing are largely unexplored. We used fMRI in human observers to study motion parallax by means of an ecologically relevant yet highly controlled stimulus that mimicked the observer's lateral motion past a depth-layered scene. We found parallax selective responses in parietal regions IPS3 and IPS4, and in a region lateral to scene selective occipital place area (OPA). The traditionally defined scene responsive regions OPA, the para-hippocampal place area (PPA) and the retrosplenial cortex (RSC) did not respond to parallax. During parallax processing, the occipital parallax selective region entertained highly specific functional connectivity with IPS3 and with scene selective PPA. These results establish a network linking dorsal motion and ventral scene processing regions specifically during parallax processing, which may underlie the brain's ability to derive 3D scene information from motion parallax.}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811915009830}, state = {published}, DOI = {10.1016/j.neuroimage.2015.10.066}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Article{ SchindlerHB2012, title = {Coding of Melodic Gestalt in Human Auditory Cortex}, journal = {Cerebral Cortex}, year = {2013}, month = {12}, volume = {23}, number = {12}, pages = {2987-2993}, abstract = {The perception of a melody is invariant to the absolute properties of its constituting notes, but depends on the relation between them—the melody's relative pitch profile. In fact, a melody's “Gestalt” is recognized regardless of the instrument or key used to play it. Pitch processing in general is assumed to occur at the level of the auditory cortex. However, it is unknown whether early auditory regions are able to encode pitch sequences integrated over time (i.e., melodies) and whether the resulting representations are invariant to specific keys. Here, we presented participants different melodies composed of the same 4 harmonic pitches during functional magnetic resonance imaging recordings. Additionally, we played the same melodies transposed in different keys and on different instruments. We found that melodies were invariantly represented by their blood oxygen level–dependent activation patterns in primary and secondary auditory cortices across instruments, and also across keys. Our findings extend common hierarchical models of auditory processing by showing that melodies are encoded independent of absolute pitch and based on their relative pitch profile as early as the primary auditory cortex.}, web_url = {http://cercor.oxfordjournals.org/content/23/12/2987}, state = {published}, DOI = {10.1093/cercor/bhs289}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Herdener M{herdener}{Department High-Field Magnetic Resonance}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Article{ SchindlerB2012, title = {Parietal Cortex Codes for Egocentric Space beyond the Field of View}, journal = {Current Biology}, year = {2013}, month = {1}, volume = {23}, number = {2}, pages = {177–182}, abstract = {Our subjective experience links covert visual and egocentric spatial attention seamlessly. However, the latter can extend beyond the visual field, covering all directions relative to our body. In contrast to visual representations [1, 2, 3 and 4], little is known about unseen egocentric representations in the healthy brain. Parietal cortex appears to be involved in both, because lesions in it can lead to deficits in visual attention, but also to a disorder of egocentric spatial awareness, known as hemispatial neglect [5 and 6]. Here, we used a novel virtual reality paradigm to probe our participants’ egocentric surrounding during fMRI recordings. We found that egocentric unseen space was represented by patterns of voxel activity in parietal cortex, independent of visual information. Intriguingly, the best decoding performances corresponded to brain areas associated with visual covert attention and reaching, as well as to lesion sites associated with spatial neglect.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0960982212014406}, state = {published}, DOI = {10.1016/j.cub.2012.11.060}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Article{ 5283, title = {Rivalry between afterimages and real images: the influence of the percept and the eye}, journal = {Journal of Vision}, year = {2011}, month = {8}, volume = {11}, number = {9:7}, pages = {1-13}, abstract = {In binocular rivalry, the conscious percept alternates stochastically between two images shown to the two eyes. Both suppressed and dominant images form afterimages (AIs) whose strength depends on the perceptual state during induction. Counterintuitively, when these two AIs rival, the AI of the previously suppressed percept gains initial dominance, even when it is weaker. Here, we examined rivalry between afterimages, between real images, and between both to examine eye-based and binocular contributions to this effect. In all experiments, we found that for both AIs and real images, the suppressed percept consistently gained initial dominance following a long suppression period. Dominance reversals failed to occur following short suppression periods and depended on an abrupt change (removal) of the stimulus. With real images, results were replicated also when eye channels were exchanged during the abrupt change. The initial dominance of the weaker, previously suppressed percept is thus not due to its weaker contrast, to it being an afterimage, or to monocular adaptation effects as previously suggested. Instead, it is due to binocular, higher level effects that favor a perceptual switch after prolonged dominance. We discuss a plausible neural account for these findings in terms of neural interactions between binocular and eye-related stages.}, web_url = {http://www.journalofvision.org/content/11/9/7.full.pdf+html}, state = {published}, DOI = {10.1167/11.9.7}, author = {Bartels A{abartels}{Department Physiology of Cognitive Processes}; Vazquez-Zuniga Y; Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}} } @Article{ SchwarzkopfSR2010, title = {Knowing with Which Eye We See: Utrocular Discrimination and Eye-Specific Signals in Human Visual Cortex}, journal = {PLoS ONE}, year = {2010}, month = {10}, volume = {5}, number = {10}, pages = {1-8}, abstract = {Neurophysiological and behavioral reports converge to suggest that monocular neurons in the primary visual cortex are biased toward low spatial frequencies, while binocular neurons favor high spatial frequencies. Here we tested this hypothesis with functional magnetic resonance imaging (fMRI). Human participants viewed flickering gratings at one of two spatial frequencies presented to either the left or the right eye, and judged which of the two eyes was being stimulated (utrocular discrimination). Using multivoxel pattern analysis we found that local spatial patterns of signals in primary visual cortex (V1) allowed successful decoding of the eye-of-origin. Decoding was above chance for low but not high spatial frequencies, confirming the presence of a bias reported by animal studies in human visual cortex. Behaviorally, we found that reliable judgment of the eye-of-origin did not depend on spatial frequency. We further analyzed the mean response in visual cortex to our stimuli and revealed a weak difference between left and right eye stimulation. Our results are thus consistent with the interpretation that participants use overall levels of neural activity in visual cortex, perhaps arising due to local luminance differences, to judge the eye-of-origin. Taken together, we show that it is possible to decode eye-specific voxel pattern information in visual cortex but, at least in healthy participants with normal binocular vision, these patterns are unrelated to awareness of which eye is being stimulated.}, web_url = {http://journals.plos.org/plosone/article/asset?id=10.1371%2Fjournal.pone.0013775.PDF}, state = {published}, DOI = {10.1371/journal.pone.0013775}, EPUB = {e13775}, author = {Schwarzkopf DS; Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Rees G} } @Poster{ SchindlerB2016_3, title = {Integration of visual and extra-retinal self-motion during voluntary head movements in the human brain}, year = {2016}, month = {11}, day = {14}, number = {329.01}, abstract = {Our phenomenological experience of the stable world is maintained due to continuous integration of visual self-motion with extra-retinal signals. This mechanism is not only essential for locomotion and navigation but also a crucial prerequisite for virtually any successful interaction with our environment. Constraints in fMRI acquisition methods previously prevented the study of neural processing associated to integration of visual signals with those related to head-movement. Here, we developed a novel and ecologically valid fMRI paradigm that enabled us to study integration of optic flow with extra-retinal heading signals while observers performed voluntary head movements. Our results provide first evidence for the multisensory integration of head-motion in human regions MST, VIP, the cingulate visual area (CSv) and a region in pecuneus (Pc) that are known to process visual self-motion signals. In addition, we found multisensory heading integration in posterior insular cortex (PIC) that we suggest to be homolog to monkey visual posterior sylvian (VPS). In contrast, no integration was found in parieto-insular-vestibular cortex (PIVC). These results identify for the first time head-movement related integration of visual heading signals in the human brain, and identify a clear functional segregation of the human posterior insular cortex.}, web_url = {http://www.abstractsonline.com/pp8/index.html#!/4071/presentation/30214}, event_name = {46th Annual Meeting of the Society for Neuroscience (Neuroscience 2016)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Poster{ NauSB2015, title = {Human early visual cortex, V3A, V6 and VIP signal the direction of retinal motion relative to the direction of eye movements}, year = {2015}, month = {11}, day = {5}, pages = {68}, abstract = {It is still not clear how the visual system compensates for self-induced visual motion. Doing so is crucial to convey visual stability and to recognize motion in the external world. Substantial evidence suggests that efference copies of eye movement commands are integrated with visual input, allowing to separate self-induced retinal motion from external objective motion. Here we used fMRI to investigate functional responses of sixteen visual areas to planar objective motion during pursuit. At two pursuit speeds, observers were exposed to objective motion that was faster, matched or slower relative to pursuit. We found that areas V3A, V6, VIP and the early visual cortex preferred objective motion faster than pursuit to objective motion slower than pursuit and thus signaled the direction of retinal motion relative to the direction of eye movements. Additionally, we examined functional connectivity between area V3A and the thalamus, which is known to contribute to cortico-cortical communication and the transmission of efference copies, and found a retinal motion dependent functional link between V3A and the dorsal thalamus. The present results emphasize the key role of area V3A in compensating self-induced visual motion and further point to an involvement of both early visual cortex and the thalamus.}, web_url = {http://www.ru.nl/dondersdiscussions/previous-events/dd2015/programme2015/}, event_name = {Donders Discussions 2015}, event_place = {Nijmegen, The Netherlands}, state = {published}, author = {Nau M{mnau}{Department Physiology of Cognitive Processes}; Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Poster{ SchindlerB2012_2, title = {Parietal cortex codes for egocentric space beyond the field of view}, year = {2012}, month = {11}, pages = {50}, abstract = {Our subjective experience links covert visual- and egocentric spatial attention seamlessly. However, the latter can extend beyond the visual field, covering all directions relative to our body. In contrast to visual representations, only little is known about unseen egocentric representations in the healthy brain. Parietal cortex appears involved in both, as lesions in it can lead to deficits in visual attention, but also to a disorder of egocentric spatial awareness, known as hemi-spatial neglect. Here, we used a novel virtual reality paradigm to probe our participants’ egocentric surrounding during fMRI recordings. We found that egocentric unseen space was encoded by patterns of voxel activity in parietal cortex. Intriguingly, the brain regions with best decoding performances comprised two areas known to be involved in visual covert attention and reaching as well as a region in inferior parietal cortex that coincided with a lesion site associated with spatial neglect.}, web_url = {http://www.danielabalslev.dk/workshop/Abstract_booklet.pdf}, event_name = {ERNI-HSF Science Meeting: Orienting of Attention: Neural Implementation, Underlying Mechanisms and Clinical Implications}, event_place = {Tübingen, Germany}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Poster{ SchindlerHB2012_2, title = {Coding of melodic Gestalt in human auditory cortex}, year = {2012}, month = {10}, day = {15}, volume = {42}, number = {462.09}, abstract = {A melody consists of a temporal sequence of pitches. Its ‘Gestalt’ is invariant to absolute pitch but depends on the relation between pitches [[unable to display character: –]] the relative pitch profile. Consequently, a melody can be recognised regardless of the instrument used to play it and it even retains its identity after transposition to a different key, which involves a global change of all pitches in the melodic sequence. In contrast, a change in a melody’s temporal pitch order is usually accompanied with a change in its relative pitch profile and therefore also affects its melodic ‘Gestalt’. Pitch processing is assumed to occur in the auditory cortex. It is however still unknown whether early auditory regions are capable of integrating pitches over time and whether the resulting representations are invariant with respect to the key of their presentation. Here, we exposed participants to different melodies composed of the same four harmonic pitches during fMRI recordings. Additionally, we presented the same melodies transposed to different keys or played on different instruments. We found that melodies were invariantly represented by their BOLD activation patterns in primary and secondary auditory cortices across instruments, and also across keys. Our findings extend common hierarchical models of auditory processing by showing that melodies are encoded independent of absolute pitch and based on their relative pitch profile as early as primary auditory cortex.}, web_url = {http://www.abstractsonline.com/Plan/ViewAbstract.aspx?sKey=0f42e296-1480-4f47-95c9-e286ef58741f&cKey=56eb3b16-1831-48e4-9a55-3948b9e6c5c9&mKey=70007181-01c9-4de9-a0a2-eebfa14cd9f1}, event_name = {42nd Annual Meeting of the Society for Neuroscience (Neuroscience 2012)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Herdener M{herdener}{Department High-Field Magnetic Resonance}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Poster{ SchindlerKB2011, title = {Decoding egocentric space in human posterior parietal cortex using fMRI}, year = {2011}, month = {11}, volume = {41}, number = {800.21}, abstract = {In our subjective experience, there is a tight link between covert visual attention and ego-centric spatial attention. One key difference is that the latter can extend beyond the visual field, providing us with an accurate mental representation of an object’s location relative to our body position. A neural link between visual and ego-centric spatial attention is suggested by lesions in parietal cortex, that lead not only to deficits in covert visual attention, but frequently also to a disorder of ego-centric spatial awareness, known as hemi-spatial neglect. While parietal involvement in covert visual spatial attention has been much studied, relatively little is known about mental representations of the unseen space around us. In the present study we examined whether also unseen spatial locations beyond the visual field are represented in parietal activity, and how they are related to retinotopic representations. We employed a novel virtual reality (VR) paradigm during functional magnetic resonance imaging (fMRI), whereby observers were prompted to draw their spatial attention to the position of one of eight possible objects located around them in an octagonal room. By changing the observers’ facing direction every few trials, the egocentric location of objects was disentangled from their absolute position and from the objects’ identity. Thus, mental representations of egocentric space surrounding the observer were sampled eight-fold. De-coding results of a multivariate pattern analysis classifier (MVPA), but not univariate results, showed that egocentric spatial directions were specifically represented in parietal cortex. These representations overlapped only partly with visually driven retinotopic activity. Our results thus show that parietal cortex codes not only for retinotopic and visually accessible space, but also for egocentric locations of the three-dimensional space surrounding us, including unseen space.}, web_url = {http://www.sfn.org/AM2011/}, event_name = {41st Annual Meeting of the Society for Neuroscience (Neuroscience 2011)}, event_place = {Washington, DC, USA}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Kleiner M{kleinerm}{Department Human Perception, Cognition and Action}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Poster{ SchindlerKB2011_2, title = {Decoding Egocentric Space in human Posterior Parietal Cortex using fMRI}, year = {2011}, month = {10}, volume = {12}, pages = {40}, abstract = {In our subjective experience, there is a tight link between covert visual attention and egocentric spatial attention. One key difference is that the latter can extend beyond the visual field, providing us with an acurate mental representation of an object’s location relative to our body position. A neural link between visual and ego-centric spatial attention is suggested by lesions in parietal cortex, that lead not only to deficits in covert visual attention, but frequently also to a disorder of ego-centric spatial awareness, known as hemi-spatial neglect. While parietal involvement in covert visual spatial attention has been much studied, relatively little is known about mental representations of the unseen space around. In the present study we examined whether also unseen spatial locations beyond the visual field are represented in parietal activity, and how they are related to retinotopic representations. We employed a novel virtual reality (VR) paradigm during functional magnetic resonance imaging (fMRI), whereby observers were prompted to draw their spatial attention to the position of one of eight possible objects located around them in an octagonal room. By changing the observers’ facing direction every few trials, the ego-centric location of objects was disentangled from their absolute position and from the objectsâ identity. Thus, mental representations of egocentric space surrounding the observer were sampled eight-fold. Decoding results of a multivariate pattern analysis classifier (MVPA), but not univariate results, showed that egocentric spatial directions were specifically represented in parietal cortex. These representations overlapped only partly with visually driven retinotopic activity. Our results thus show that parietal cortex codes not only for retinotopic and visually accessible space, but also for ego-centric locations of the three-dimensional space surrounding us, including unseen space.}, event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Kleiner M{kleinerm}{Department Human Perception, Cognition and Action}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Conference{ NauKSDB2015, title = {Area V3A encodes objective motion velocity regardless of eye movement velocity}, year = {2015}, month = {3}, day = {18}, pages = {45}, abstract = {It is still not very clear how the visual system compensates for self-induced visual motion. This mechanism is crucial to convey visual stability, and also to recognize motion in the external world. There are two possibilities how an object can change its position in your visual field. Either it moves in the outside world or we move our eyes. In both cases its image will move across the retina. The mechanisms enabling us to discriminate between these two options are still not well understood. It is thought that efference copies of eye movement commands are integrated with visual input, allowing to separate self-induced retinal motion from external objective motion. A recent fMRI study showed that area V3A encodes visual motion almost exclusively in world-centered (objective) coordinates, while being almost unresponsive to retinal motion per se. Conversely, the human motion complex (V5/MT and MST) encoded both, objective and retinal motion with equal strength (Fischer, Bülthoff, Logothetis and Bartels, 2012). In the present study we asked two related questions. First, we asked whether human motion regions differentiate between outside objective motion being faster or slower than eye movements with different speeds (i.e. resulting in either positively or negatively signed retinal motion). Second, do these regions encode retinal and objective motion in absolute units or in units relative to the velocity of eye movements? To answer these questions, we created 2D random dot stimuli that moved either slower or faster than a fixation dot. All velocities were chosen so that we could examine neural responses to slower, matched and faster background motion relative to 0°/s, 2°/s and 3°/s eye movement speed. Moreover, we ran a functional localizer scan for each subject, allowing us to identify areas V5/MT, MST, V3A, V6, and CSv for region of interest (ROI) analyses. In our analysis, we tested each ROI’s response using a separate set of general linear models (GLM). The GLMs incorporated each of the above hypothesized response properties, and F-tests were used to identify which of the competing models accounted for significantly more variance. We found that all regions encoded both, retinal and objective motion in absolute, not in relative units, and that V3A, but not CSv or V5/MT does differentiate between negatively and positively signed retinal motion. These results suggest that motion is encoded in absolute units throughout the visual motion system, and that V3A has indeed a special role among human motion processing regions in that it represents motion signed with respect to eye movement direction.}, web_url = {https://www.nwg-goettingen.de/2015/upload/file/Proceedings_NWG2015.pdf}, event_name = {11th Göttingen Meeting of the German Neuroscience Society, 35th Göttingen Neurobiology Conference}, event_place = {Göttingen, Germany}, state = {published}, author = {Nau M{mnau}{Department Physiology of Cognitive Processes}; Korkmaz-Hacialihafiz D{dkorkmaz}{Department Physiology of Cognitive Processes}; Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Darmani G; Bartels A{abartels}{Department Physiology of Cognitive Processes}} } @Conference{ SchindlerB2014, title = {Parietal Representations of Egocentric Space include unseen Locations}, year = {2014}, month = {4}, day = {2}, volume = {56}, pages = {229}, abstract = {Our subjective experience links covert visual and egocentric spatial attention seamlessly. However, the latter can extend beyond the visual field, covering all directions relative to our body. Even with closed eyes we can rotate from the computer screen to face the window with little loss of accuracy, and once rotated we are aware of the computer’s updated egocentric position. It appears thus that our egocentric model includes seen and unseen locations. In contrast to visual representations, little is known about unseen egocentric representations in the healthy brain. Parietal cortex appears to be involved in both, because its lesions can lead to deficits in visual attention, but also to a disorder of egocentric spatial awareness, known as hemispatial neglect. In this study, our participants performed a novel egocentric orientation task inside an octagonal room. Once they were familiar with this setup, we exposed our participants to a virtual version of the same paradigm during fMRI recordings. We found egocentric unseen space represented by patterns of voxel activity in parietal cortex, independent of visual information. Intriguingly, the best decoding performances corresponded to brain areas associated with visual covert attention and reaching, as well as to lesion sites associated with spatial neglect.}, web_url = {https://www.teap.de/memory/TeaP_Abstracts_20140219.pdf}, event_name = {56th Conference of Experimental Psychologists (TeaP 2014)}, event_place = {Giessen, Germany}, state = {published}, author = {Schindler A{aschindler}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}} }