@Poster{ DanyeliASJSW2017, title = {Effects of Neurexan® on brain responses to deviant stimuli during an auditory oddball task}, journal = {Frontiers in Psychiatry}, year = {2017}, month = {6}, volume = {Conference Abstracts: ISAD LONDON 2017}, abstract = {Introduction: Neurexan®, a medicinal product sold over the counter (OTC), is composed of four plant and mineral ingredients; passionflower, oats, coffee and zinc valerianate. Neurexan® has been investigated in patients with symptoms related to acute stress, nervousness/restlessness, and insomnia. The previous research suggested an attenuated neuroendocrine stress response in healthy volunteers induced by Neurexan® (Doering et al. 2016). This study further explores the effects of Neurexan® on cognitive performance and attention. It is generally recognized that stress is associated with cognitive impairments. Expecting that Neurexan® reduces the stress level, we hypothesized that the subjects in the placebo group would be more susceptible to distraction compared to treatment group. Material and Methods: In a randomized, placebo-controlled, double-blind, two-period crossover trial, brain responses of 39 healthy, moderate stressed males were measured during an unattended auditory oddball paradigm via 64-channel electroencephalogram (EEG) after intake of Neurexan® and placebo. The paradigm consisted of 80% standard tones and two types of deviant tones (10% frequency deviant; 10% duration deviant), presented in a pseudo-randomized order. The standard tone was composed of eight equally loud sinusoidal tones (fundamental frequency 330 Hz and seven harmonic partials) and had a duration of 100 ms. The deviants were either 40 ms shorter (duration deviant) or 1.25 semitones higher (frequency deviant). Results: Here we present the results about the effect of Neurexan treatment on both mismatch negativity (MMN) and latency of the peaks when assessing EEG responses (ERPs) to deviant tones (frequency and duration deviant) compared to placebo. Discussion: Our findings suggest that Neurexan® also leads to subtle primary processing changes additionally to its postulated top down effects.}, event_name = {ISAD LONDON 2017: Perspectives on Mood and Anxiety Disorders: Looking to the future}, event_place = {London, UK}, state = {published}, DOI = {10.3389/conf.fpsyt.2017.48.00008}, author = {Danyeli L; Alizadeh S; Surova G; Jamalabadi H; Schultz M; Walter M{mwalter}} } @Proceedings{ ChuangGKS2017, title = {Ambient Notification Environments}, year = {2017}, month = {4}, pages = {-}, web_url = {https://www.dagstuhl.de/de/programm/kalender/semhp/?semnr=17161}, publisher = {Leibniz-Zentrum für Informatik}, address = {Schloss Dagstuhl, Germany}, series = {Dagstuhl Reports}, event_name = {Dagstuhl Seminar 17161}, event_place = {Schloss Dagstuhl, Germany}, state = {accepted}, ISBN = {-}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Gehring S; Kay J; Schmidt A} } @Conference{ Chuang2015_3, title = {Beyond Steering in Human-Centered Closed-Loop Control}, year = {2015}, month = {11}, day = {5}, abstract = {Machines provide us with the capacity to achieve goals beyond our physical limitations. For example, automobiles and aircraft extend our physical mobility, allowing us to travel vast distances far ahead of the time it would take us otherwise. It is truly remarkable that our natural perceptual and motor capabilities are able to adapt, with sufficient training, to the unnatural demands posed by vehicle handling. While much progress has been achieved in formalizing the control relationship between the human operator and the controlled vehicle, considerably less is understood with regards to how human cognition influences this control relationship. Such an understanding is particularly important in the prevalence of autonomous vehicular control, which stands to radically modify the responsibility of the human operator from one of control to supervision. In this talk, I will first explain how the limitations of a classical cybernetics approach can reveal the necessity of understanding high-level cognition during control, such as anticipation and expertise. Next, I will present our research that relies on unobtrusive measurement techniques (i.e., gaze-tracking, EEG/ERP) to understand how human operators seek out and process relevant information whilst steering. Examples from my lab will be used to demonstrate of how such findings can effectively contribute to the development of human-centered technology in the steering domain, such as with the use of warning cues and shared control. Finally, I will briefly present some efforts in modeling an augmented aerial vehicle (e.g., civil helicopters), with the goal of making flying a rotorcraft as easy as driving (www.mycopter.eu).}, web_url = {http://inc.ucsd.edu/inc_chalk.html}, event_name = {Institute for Neural Computation: INC Chalk Talk Series}, event_place = {San Diego, CA, USA}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ StanglMPSBW2015, title = {Triggers of entorhinal grid cell and hippocampal place cell remapping in humans}, year = {2015}, month = {10}, day = {19}, volume = {45}, number = {437.04}, abstract = {Navigating the environment requires the integration of distance, direction, and place information, which critically depends on hippocampal place and entorhinal grid cells. Studies in rodents have shown, however, that substantial changes in the environment’s surroundings can trigger a change in the set of active place cells, accompanied by a rotation of the grid cell firing pattern (Fyhn et al., 2007) - a phenomenon commonly referred to as global remapping. In the present study, we investigated whether human grid and place cells show a similar remapping behavior in response to environmental changes and whether different episodes in the same environment might cause remapping as well. In two experiments, participants underwent 3T fMRI scanning while they navigated a virtual environment, comprising two different rooms in which objects were placed in random locations. Participants explored the first room and learned these object-location conjunctions (learning-phase), after which the objects disappeared and participants were asked to navigate repeatedly to the different object locations (test-phase). This procedure (i.e. a learning- and test-phase within a room) was repeated several times, separated by different events, such as leaving and re-entering the same room, or moving to the second, different room. Indicators of grid cell firing were derived from the BOLD activation while participants moved within the virtual environment, whereas indicators of place cell firing were derived from the activation patterns while participants were standing at particular object locations. We compared these indicators between the different rooms and events to investigate how these manipulations influence remapping. Overall, our findings demonstrate entorhinal grid cell and hippocampal place cell remapping in humans. Furthermore, our results suggest that beside environmental changes, also other events (e.g., re-entering the same environment) might evoke remapping. We conclude that, in humans, remapping is not only environment-based but also event-based and might serve as a neural mechanism to create distinct memory traces for episodic memory formation.}, web_url = {http://www.sfn.org/am2015/}, event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)}, event_place = {Chicago, IL, USA}, state = {published}, author = {Stangl M; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}; Pape A-A{antopia}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wolbers T} } @Poster{ FademrechtBBd2015_2, title = {The spatial extent of action sensitive perceptual channels decrease with visual eccentricity}, year = {2015}, month = {10}, day = {18}, volume = {45}, number = {170.13}, abstract = {Actions often occur in the visual periphery. Here we measured the spatial extent of action sensitive perceptual channels across the visual field using a behavioral action adaptation paradigm. Participants viewed an action (punch or handshake) for a prolonged amount of time (adaptor) and subsequently categorized an ambiguous test action as either 'punch' or 'handshake'. The adaptation effect refers to the biased perception of the test stimulus due to the prolonged viewing of the adaptor and the resulting loss of sensitivity to that stimulus. Therefore the more a channel responds to a specific stimulus the higher is the adaptation effect for that certain channel. We measured the size of the adaptation effect as a function of the spatial distance between adaptor and test stimuli in order to determine if actions can be processed in spatially distinct channels. Specifically, we adapted participants at 0° (fixation), 20° and 40° eccentricity in three separate conditions to measure the putative spatial extent of action channels at these positions. In each condition, we measured the size of the adaptation effect at -60°,-40°,-20°, 0°,20°,40°,60° of eccentricity. We fitted Gaussian functions to describe the channel response of each condition and used the full width at half maximum (FWHM) of the Gaussians as a measure of the spatial extent of the action channels. In contrast to previous reports of an increase of midget ganglion cell dendritic field size with eccentricity (Dacey, 1993), our results showed that FWHM decreased with eccentricity (FWHM at 0°: 56°, FWHM at 20°: 29, FWHM at 40°: 26). We then asked whether the response of these action sensitive perceptual channels can be used to predict average recognition performance (d') of social actions across the visual field obtained in a previous study (Fademrecht et al. 2014). We used G(x) - the summed response of all three channels at eccentricity x, to predict recognition performance at eccentricity x. A simple linear transformation of the summed channel response of the form a+b*G(x) was able to predict 95.5% of the variation in the recognition performance. Taken together these results demonstrate that actions can be processed in separate spatially distinct perceptual channels, their FWHM decreases with eccentricity and can be used to predict action recognition performance in the visual periphery.}, web_url = {http://www.sfn.org/am2015/}, event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)}, event_place = {Chicago, IL, USA}, state = {published}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Barraclough NE; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerBC2015_3, title = {On the influence of steering on the orienting response}, year = {2015}, month = {10}, pages = {24}, abstract = {The extent to which we experience ‚workload‘ whilst steering depends on (i) the availability of the human operator’s (presumably limited) resources and, (ii) the demands of the steering task. Typically, an increased demand of the steering task for a specific resource can be inferred from how steering modifies the components of the event-related potential (ERP), which is elicited by the stimuli of a competing task. Recent studies have demonstrated that this approach can continue to be applied even when the stimuli does not require an explicit response. Under certain circumstances, workload levels in the primary task can influence the ERPs that are elicited by task-irrelevant target events, in particular complex environmental sounds. Using this approach, the current study assesses the human operator’s resources that are demanded by different aspects of the steering task. To enable future studies to focus their analysis, we identify ERP components and electrodes that are relevant to steering demands, using mass univariate analysis. Additionally we compare the effectiveness of sound stimuli that are conventionally employed to elicit ERPs for assessing workload, namely pure-tone oddballs and environmental sounds. In the current experiment, participants performed a compensatory tracking task that required them to align a continuously perturbed target line to a stationary reference line. Task difficulty was manipulated either by varying the bandwidth of the disturbance or by varying the complexity of the controller dynamics of the steering system. Both manipulations presented two levels of difficulty (‚Easy‘ and ‚Hard‘), which could be contrasted to a baseline ‘View only’ condition. During the steering task, task-irrelevant sounds were presented to elicit ERPs: frequent pure-tone standards, rare pure-tone oddballs and rare environmental sounds. Our results show that steering task demands influence ERP components that are suggested by the previous literature to be related to the following cognitive processes, namely the call for orientation (i.e., early P3a), the orientation of attention (i.e., late P3a), and the semantic processing of the task-irrelevant sound stimuli (i.e., N400). The early P3 was decreased in the frontocentral electrodes, the late P3 centrally and the N400 centrally and over the left hemisphere. Single subject analyses on these identified components reveal differences that correspond to our manipulations of steering difficulty. More participants discriminate for above components in the ‘Hard’ relative to the ‘Easy’ condition. The current study identifies the spatial and temporal distribution of ERPs that ought to be targeted for future investigations of the influence of steering on workload. In addition, the use of task-irrelevant environmental sounds to elicit ERP indices for workload holds several advantages over conventional beep tones, especially in the operational context. Finally, the current findings indicate the involvement of cognitive processes in steering, which is typically viewed as being a predominantly visuo-motor task.}, web_url = {http://www.ipa.tu-berlin.de/bwmms/11_berliner_werkstatt_mms/}, editor = {Wienrich, C. , T. Zander, K. Gramann}, publisher = {Universitätsverlag der TU Berlin}, address = {Berlin, Germany}, booktitle = {Trends in Neuroergonomics}, event_name = {11. Berliner Werkstatt Mensch-Maschine-Systeme}, event_place = {Berlin, Germany}, state = {published}, ISBN = {978-3-7983-2803-7}, DOI = {10.14279/depositonce-4887}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2015_2, title = {Non-obtrusive measurements of attention and workload in steering}, year = {2015}, month = {9}, day = {16}, web_url = {http://dsc2015.tuebingen.mpg.de/Program.html}, event_name = {DSC 2015 Europe: Driving Simulation Conference & Exhibition}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GlatzBC2015_3, title = {Attention Enhancement During Steering Through Auditory Warning Signals}, year = {2015}, month = {9}, day = {1}, pages = {1-5}, abstract = {Nowadays modern cars integrate advanced driving assistance systems which range up to fully automated driving modes. Since fully automated driving modes have not come into everyday practice yet, operators are currently making use of assistance systems. While still being in control of the vehicle, alerts signal possible collision dangers when, for example, parking. The reason for the necessity of such warnings is the fact that humans have limited resources. A critical event can stay unnoticed simply because the attention was focused elsewhere. This raises the question: What is an effective alert in a steering environment? Auditory warning signals have been shown to efficiently direct attention. In the context of traffic, they can prevent collisions by heightening the driver's situational awareness to potential accidents.}, web_url = {http://www.auto-ui.org/15/workshops.php}, event_name = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions In conjunction with AutomotiveUI 2015 (WAADI'15)}, event_place = {Nottingham, UK}, state = {published}, ISBN = {978-1-4503-3736-6}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangB2015, title = {Towards a Better Understanding of Gaze Behavior in the Automobile}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, abstract = {Gaze-tracking technology is used increasingly to determine how and which information is accessed and processed in a given interface environment, such as in-vehicle information systems in automobiles. Typically, fixations on regions of interest (e.g., windshield, GPS) are treated as an indication that the underlying information has been attended to and is, thus, vital to the task. Therefore, decisions such as optimal instrument placement are often made on the basis of the distribution of recorded fixations. In this paper, we briefly introduce gaze-tracking methods for in-vehicle monitoring, followed by a discussion on the relationship between gaze and user-attention. We posit that gaze-tracking data can yield stronger insights on the utility of novel regions- of-interests if they are considered in terms of their deviation from basic gaze patterns. In addition, we suggest how EEG recordings could complement gaze-tracking data and raise outstanding challenges in its implementation. It is contended that gaze-tracking is a powerful tool for understanding how visual information is processed in a given environment, provided it is understood in the context of a model that first specifies the task that has to be carried out.}, web_url = {http://www.auto-ui.org/15/p/workshops/2/8_Towards%20a%20Better%20Understanding%20of%20Gaze%20Behavior%20in%20the%20Automobile_Chuang.pdf}, event_name = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions In conjunction with AutomotiveUI 2015}, event_place = {Nottingham, UK}, state = {published}, ISBN = {8-1-4503-3736-6}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ LockenBMCSAM2015, title = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, abstract = {Informing a driver of the vehicle’s changing state and environment is a major challenge that grows with the introduction of automated in-vehicle assistant and infotainment systems. Poorly designed systems could compete for the driver’s attention, away from the primary driving task. Thus, such systems should communicate information in a way that conveys its relevant urgency. While some information is unimportant and should never distract a driver from important tasks, there are also calls for action, which a driver should not be able to ignore. We believe that adaptive ambient displays and peripheral interaction could serve to unobtrusively present information while switching the driver’s attention when needed. This workshop will focus in promoting an exchange of best known methods, by discussing challenges and potentials for this kind of interaction in today’s scenarios as well as in future mixed or full autonomous traffic. The central objective of this workshop is to bring together researchers from different domains and discuss innovative, and engaging ideas and a future landscape for research in this area.}, web_url = {http://www.auto-ui.org/15/p/workshopproposals/WAADI.pdf}, event_name = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions In conjunction with AutomotiveUI 2015 (WAADI'15)}, event_place = {Nottingham, UK}, state = {published}, author = {L\"ocken A; Borojeni SS; M\"uller H; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Schroeter R; Alvarez I; Meijering V} } @Inproceedings{ RienerAJCJPC2015, title = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, web_url = {http://www.auto-ui.org/15/workshops.php}, event_name = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions In conjunction with AutomotiveUI 2015}, event_place = {Nottingham, UK}, state = {published}, ISBN = {978-1-4503-3736-6}, author = {Riener A; Alvarez I; Jeon MP; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Ju W; Pfleging B; Chiesa M} } @Article{ BiegCBB2015, title = {Asymmetric saccade reaction times to smooth pursuit}, journal = {Experimental Brain Research}, year = {2015}, month = {9}, volume = {233}, number = {9}, pages = {2527-2538}, abstract = {Before initiating a saccade to a moving target, the brain must take into account the target’s eccentricity as well as its movement direction and speed. We tested how the kinematic characteristics of the target influence the time course of this oculomotor response. Participants performed a step-ramp task in which the target object stepped from a central to an eccentric position and moved at constant velocity either to the fixation position (foveopetal) or further to the periphery (foveofugal). The step size and target speed were varied. Of particular interest were trials that exhibited an initial saccade prior to a smooth pursuit eye movement. Measured saccade reaction times were longer in the foveopetal than in the foveofugal condition. In the foveopetal (but not the foveofugal) condition, the occurrence of an initial saccade, its reaction time as well as the strength of the pre-saccadic pursuit response depended on both the target’s speed and the step size. A common explanation for these results may be found in the neural mechanisms that select between oculomotor response alternatives, i.e., a saccadic or smooth response.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs00221-015-4323-8.pdf}, state = {published}, DOI = {10.1007/s00221-015-4323-8}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerBC2015_2, title = {On the Cognitive Demands of Different Controller Dynamics: A within-subject P300 Analysis}, year = {2015}, month = {9}, pages = {1042-1046}, abstract = {The cognitive workload of a steering task could reflect its demand on attentional as well as working memory resources under different conditions. These respective demands could be differentiated by evaluating components of the event-related potential (ERP) response to different types of stimulus probes, which are claimed to reflect the availability of either attention (i.e., novelty-P3) or working memory (i.e., target-P3) resources. Here, a within-subject analysis is employed to evaluate the robustness of ERP measurements in discriminating the cognitive demands of different steering conditions. We find that the amplitude of novelty-P3 ERPs to task-irrelevant environmental sounds is diminished when participants are required to perform a steering task. This indicates that steering places a demand on attentional resources. In addition, target-P3 ERPs to a secondary auditory detection task vary when the controller dynamics in the steering task are manipulated. This indicates that differences in controller dynamics vary in their working memory demands.}, web_url = {http://pro.sagepub.com/content/59/1/1042.full.pdf+html}, publisher = {Sage}, address = {London, UK}, event_name = {Human Factors and Ergonomics Society Annual Meeting (HFES 2015)}, event_place = {Los Angeles, CA, USA}, state = {published}, DOI = {10.1177/1541931215591294}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GlatzBC2015_2, title = {Warning Signals With Rising Profiles Increase Arousal}, year = {2015}, month = {9}, pages = {1011}, abstract = {Auditory warnings are often used to direct a user’s attention from a primary task to critical peripheral events. In the context of traffic, in-vehicle collision avoidance systems could, for example, employ spatially relevant sounds to alert the driver to the possible presence of a crossing pedestrian. This raises the question: What is an effective auditory alert in a steering environment? Ideally, such warning signals should not only arouse the driver but also result in deeper processing of the event that the driver is being alerted to. Warning signals can be designed to convey the time to contact with an approaching object (Gray, 2011). That is, sounds can rise in intensity in accordance with the physical velocity of an approaching threat. The current experiment was a manual steering task in which participants were occasionally required to recognized peripheral visual targets. These visual targets were sometimes preceded by a spatially congruent auditory warning signal. This was either a sound with constant intensity, linearly rising intensity, or non-linearly rising intensity that conveyed time-to-contact. To study the influence of warning cues on the arousal state, different features of electroencephalography (EEG) were measured. Alpha frequency, which ranges from 7.5 to 12.5 Hz, is believed to represent different cognitive processes, in particular arousal (Klimesch, 1999). That is, greater desynchronization in the alpha frequency reflects higher levels of attention as well as alertness. Our results showed a significant decrease in alpha power for sounds with rising intensity profiles, indicating increased alertness and expectancy for an event to occur. To analyze whether the increased arousal for rising sounds resulted in deeper processing of the visual target, we analyzed the event related potential P3. It is a positive component that occurs approximately 300 ms after an event and is known to be associated with recognition performance of a stimulus (Parasuraman & Beatty, 1980). In other words, smaller P3 amplitudes indicate worse identification than larger amplitudes. Our results show that sounds with time-to-contact properties induced larger P3 responses to the targets that they cued compared to targets cued by constant or linearly rising sounds. This suggests that rising sounds with time-to-contact intensity profiles evoke deeper processing of the visual target and therefore result in better identification than events cued by sounds with linearly rising or constant intensity.}, web_url = {http://pro.sagepub.com/content/59/1/1011.full.pdf+html}, publisher = {Sage}, address = {London, UK}, event_name = {Human Factors and Ergonomics Society Annual Meeting (HFES 2015)}, event_place = {Los Angeles, CA, USA}, state = {published}, DOI = {10.1177/1541931215591402}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ DobsSBG2015, title = {Independent control of cortical representations for expression and identity of dynamic faces}, journal = {Journal of Vision}, year = {2015}, month = {9}, volume = {15}, number = {12}, pages = {684}, abstract = {Humans can easily extract who someone is and what expression they are making from the complex interplay of invariant and changeable visual features of faces. Recent evidence suggests that cortical mechanisms to selectively extract information about these two socially critical cues are segregated. Here we asked if these systems are independently controlled by task demands. We therefore had subjects attend to either identity or expression of the same dynamic face stimuli and examined cortical representations in topographically and functionally localized visual areas using fMRI. Six human subjects performed a task that involved detecting changes in the attended cue (expression or identity) of dynamic face stimuli (8 presentations per trial of 2s movie clips depicting 1 of 2 facial identities expressing happiness or anger) in 18-20 7min scans (20 trials/scan in pseudorandom order) in 2 sessions. Dorsal areas such as hMT and STS were disassociated from more ventral areas such as FFA and OFA by their modulation with task demands and their encoding of exemplars of expression and identity. In particular, dorsal areas showed higher activity during the expression task (hMT: p< 0.05, lSTS: p< 0.01; t-test) where subjects were cued to attend to the changeable aspects of the faces whereas ventral areas showed higher activity during the identity task (lOFA: p< 0.05; lFFA: p< 0.05). Specific exemplars of identity could be reliably decoded (using linear classifiers) from responses of ventral areas (lFFA: p< 0.05; rFFA: p< 0.01; permutation-test). In contradistinction, dorsal area responses could be used to decode specific exemplars of expression (hMT: p< 0.01; rSTS: p< 0.01), but only if expression was attended by subjects. Our data support the notion that identity and expression are processed by segregated cortical areas and that the strength of the representations for particular exemplars is under independent task control.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433792}, event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/15.12.684}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Gardner JL} } @Poster{ ZhaoB2015, title = {Intrinsic Memorability Predicts Short- and Long-Term Memory of Static and Dynamic Faces}, journal = {Journal of Vision}, year = {2015}, month = {9}, volume = {15}, number = {12}, pages = {698}, abstract = {Does a face itself determine how well it will be recognized? Unlike many previous studies that have linked face recognition performance to individuals’ face processing ability (e.g., holistic processing), the present study investigated whether recognition of natural faces can be predicted by the faces themselves. Specifically, we examined whether short- and long-term recognition memory of both dynamic and static faces can be predicted according to face-based properties. Participants memorized either dynamic (Experiment 1) or static (Experiment 2) natural faces, and recognized them with both short- and long-term retention intervals (three minutes vs. seven days). We found that the intrinsic memorability of individual faces (i.e., the rate of correct recognition across a group of participants) consistently predicted an independent group of participants’ performance in recognizing the same faces, for both static and dynamic faces and for both short- and long-term face recognition memory. This result indicates that intrinsic memorability of faces is bound to face identity rather than image properties. Moreover, we also asked participants to judge subjective memorability of faces they just learned, and to judge whether they were able to recognize the faces in late test. The result shows that participants can extract intrinsic face memorability at encoding. Together, these results provide compelling evidence for the hypothesis that intrinsic face memorability predicts natural face recognition, highlighting that face recognition performance is not only a function of individuals’ face processing ability, but also determined by intrinsic properties of faces.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433806}, event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/15.12.698}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ FademrechtBd2015, title = {Recognition of static and dynamic social actions in the visual periphery}, journal = {Journal of Vision}, year = {2015}, month = {9}, volume = {15}, number = {12}, pages = {494}, abstract = {Although actions often appear in the visual periphery, little is known about action recognition outside of the fovea. Our previous results have shown that action recognition of moving life-size human stick figures is surprisingly accurate even in far periphery and declines non-linearly with eccentricity. Here, our aim was (1) to investigate the influence of motion information on action recognition in the periphery by comparing static and dynamic stimuli recognition and (2) to assess whether the observed non-linearity in our previous study was caused by the presence of motion because a linear decline of recognition performance with increasing eccentricity was reported with static presentations of objects and animals (Jebara et al. 2009; Thorpe et al. 2001). In our study, 16 participants saw life-size stick figure avatars that carried out six different social actions (three different greetings and three different aggressive actions). The avatars were shown dynamically and statically on a large screen at different positions in the visual field. In a 2AFC paradigm, participants performed 3 tasks with all actions: (a) They assessed their emotional valence; (b) they categorized each of them as greeting or attack and (c) they identified each of the six actions. We found better recognition performance for dynamic stimuli at all eccentricities. Thus motion information helps recognition in the fovea as well as in far periphery. (2) We observed a non-linear decrease of recognition performance for both static and dynamic stimuli. Power law functions with an exponent of 3.4 and 2.9 described the non-linearity observed for dynamic and static actions respectively. These non-linear functions describe the data significantly better (p=.002) than linear functions and suggest that human actions are processed differently from objects or animals.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433602}, event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/15.12.494}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffZ2015, title = {What Type of Facial Information Underlies Holistic Face Processing?}, journal = {Journal of Vision}, year = {2015}, month = {9}, volume = {15}, number = {12}, pages = {145}, abstract = {Holistic face processing is often referred to as the inability to selectively attend to part of faces without interference from irrelevant facial parts. While extensive research seeks for the origin of holistic face processing in perceiver-based properties (e.g., expertise), the present study aimed to pinpoint face-based visual information that may support this hallmark indicator of face processing. Specifically, we used the composite face task, a standard task of holistic processing, to investigate whether facial surface information (e.g., texture) or facial shape information underlies holistic face processing, since both sources of information have been shown to support face recognition. In Experiment 1, participants performed two composite face tasks, one for normal faces (i.e., shape + surface information) and one for shape-only faces (i.e., without facial surface information). We found that facial shape information alone is as sufficient to elicit holistic processing as normal faces, indicating that facial surface information is not necessary for holistic processing. In Experiment 2, we tested whether facial surface information alone is sufficient to observe holistic face processing. We chose to control facial shape information instead of removing it by having all test faces to share exactly the same facial shape, while exhibiting different facial surface information. Participants performed two composite face tasks, one for normal faces and one for same-shape faces. We found a composite face effect in normal faces but not in same-shape faces, indicating that holistic processing is mediated predominantly by facial shape rather than surface information. Together, these results indicate that facial shape, but not surface information, underlies holistic face processing.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433183}, event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/15.12.145}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Zhao M{mzhao}{Department Human Perception, Cognition and Action}} } @Conference{ FladBC2015_2, title = {Towards studying the influence of information channel properties on visual scanning processes}, year = {2015}, month = {8}, day = {17}, pages = {8}, web_url = {http://summerschool.igd-r.fraunhofer.de/summer_school_program_booklet.pdf}, event_name = {International Summer School on Visual Computing (VCSS 2015)}, event_place = {Rostock, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ FladBC2015_3, title = {Combined use of eye-tracking and EEG to understand visual information processing}, year = {2015}, month = {8}, pages = {115-124}, web_url = {http://summerschool.igd-r.fraunhofer.de/index2.php}, editor = {Schulz, H.-J. , B. Urban, U. von Lukas}, publisher = {Fraunhofer Verlag}, address = {Stuttgart, Germany}, event_name = {International Summer School on Visual Computing (VCSS 2015)}, event_place = {Rostock, Germany}, state = {published}, ISBN = {978-3-8396-0960-6}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffMT2015, title = {Active and passive exploration of faces}, journal = {Perception}, year = {2015}, month = {8}, volume = {44}, number = {ECVP Abstract Supplement}, pages = {51}, abstract = {In most face recognition studies, learned faces are shown without a visible body to passive participants. Here, faces were attached to a body and participants were either actively or passively viewing them before their recognition performance was tested. 3D-laser scans of real faces were integrated onto sitting or standing full-bodied avatars placed in a virtual room. In the ‘active’ learning condition, participants viewed the virtual environment through a head-mounted display. Their head position was tracked to allow them to walk physically from one avatar to the next and to move their heads to look up or down to the standing or sitting avatars. In the ‘passive dynamic’ condition, participants saw a rendering of the visual explorations of the first group. In the ‘passive static’ condition, participants saw static screenshots of the upper bodies in the room. Face orientation congruency (up versus down) was manipulated at test. Faces were recognized more accurately when viewed in a familiar orientation for all learning conditions. While active viewing in general improved performance as compared to viewing static faces, passive observers and active observers - who received the same visual information - performed similarly, despite the absence of volitional movements for the passive dynamic observers.}, web_url = {http://pec.sagepub.com/content/44/1_suppl.toc}, event_name = {38th European Conference on Visual Perception (ECVP 2015)}, event_place = {Liverpool, UK}, state = {published}, DOI = {10.1177/0301006615598674}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Poster{ FademrechtBBd2015, title = {Seeing actions in the fovea influences subsequent action recognition in the periphery}, journal = {Perception}, year = {2015}, month = {8}, volume = {44}, number = {ECVP Abstract Supplement}, pages = {214}, abstract = {Although actions often appear in the visual periphery, little is known about action recognition away from fixation. We showed in previous studies that action recognition of moving stick-figures is surprisingly good in peripheral vision even at 75° eccentricity. Furthermore, there was no decline of performance up to 45° eccentricity. This finding could be explained by action sensitive units in the fovea sampling also action information from the periphery. To investigate this possibility, we assessed the horizontal extent of the spatial sampling area (SSA) of action sensitive units in the fovea by using an action adaptation paradigm. Fifteen participants adapted to an action (handshake, punch) at the fovea were tested with an ambiguous action stimulus at 0°, 20°, 40° and 60° eccentricity left and right of fixation. We used a large screen display to cover the whole horizontal visual field of view. An adaptation effect was present in the periphery up to 20° eccentricity (p<0.001), suggesting a large SSA of action sensitive units representing foveal space. Hence, action recognition in the visual periphery might benefit from a large SSA of foveal units.}, web_url = {http://pec.sagepub.com/content/44/1_suppl.toc}, event_name = {38th European Conference on Visual Perception (ECVP 2015)}, event_place = {Liverpool, UK}, state = {published}, DOI = {10.1177/0301006615598674}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; Barraclough NE; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ Chuang2015, title = {Error Visualization and Information-Seeking Behavior for Air-Vehicle Control}, year = {2015}, month = {7}, pages = {3-11}, abstract = {A control schema for a human-machine system allows the human operator to be integrated as a mathematical description in a closed-loop control system, i.e., a pilot in an aircraft. Such an approach typically assumes that error feedback is perfectly communicated to the pilot who is responsible for tracking a single flight variable. However, this is unlikely to be true in a flight simulator or a real flight environment. This paper discusses different aspects that pertain to error visualization and the pilot’s ability in seeking out relevant information across a range of flight variables.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-20816-9_1.pdf}, editor = {Schmorrow, D.D. , C.M. Fidopiastis}, publisher = {Springer International Publishing}, address = {Cham, Switzerland}, series = {Lecture Notes in Artificial Intelligence ; 9183}, booktitle = {Foundations of Augmented Cognition}, event_name = {9th International Conference on Augmented Cognition (AC 2015), held as part of HCI International 2015}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-3-319-20815-2}, DOI = {10.1007/978-3-319-20816-9_1}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Article{ KimSWLB2015, title = {Abstract Representations of Associated Emotions in the Human Brain}, journal = {Journal of Neuroscience}, year = {2015}, month = {4}, volume = {35}, number = {14}, pages = {5655-5663}, abstract = {Emotions can be aroused by various kinds of stimulus modalities. Recent neuroimaging studies indicate that several brain regions represent emotions at an abstract level, i.e., independently from the sensory cues from which they are perceived (e.g., face, body, or voice stimuli). If emotions are indeed represented at such an abstract level, then these abstract representations should also be activated by the memory of an emotional event. We tested this hypothesis by asking human participants to learn associations between emotional stimuli (videos of faces or bodies) and non-emotional stimuli (fractals). After successful learning, fMRI signals were recorded during the presentations of emotional stimuli and emotion-associated fractals. We tested whether emotions could be decoded from fMRI signals evoked by the fractal stimuli using a classifier trained on the responses to the emotional stimuli (and vice versa). This was implemented as a whole-brain searchlight, multivoxel activation pattern analysis, which revealed successful emotion decoding in four brain regions: posterior cingulate cortex (PCC), precuneus, MPFC, and angular gyrus. The same analysis run only on responses to emotional stimuli revealed clusters in PCC, precuneus, and MPFC. Multidimensional scaling analysis of the activation patterns revealed clear clustering of responses by emotion across stimulus types. Our results suggest that PCC, precuneus, and MPFC contain representations of emotions that can be evoked by stimuli that carry emotional information themselves or by stimuli that evoke memories of emotional stimuli, while angular gyrus is more likely to take part in emotional memory retrieval.}, web_url = {http://www.jneurosci.org/content/35/14/5655.full.pdf+html}, state = {published}, DOI = {10.1523/JNEUROSCI.4059-14.2015}, author = {Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Rohe T{trohe}{Department Human Perception, Cognition and Action}{Research Group Cognitive Neuroimaging}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Lee S-W; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ BulthoffN2015, title = {Distinctive voices enhance the visual recognition of unfamiliar faces}, journal = {Cognition}, year = {2015}, month = {4}, volume = {137}, pages = {9–21}, abstract = {Several studies have provided evidence in favour of a norm-based representation of faces in memory. However, such models have hitherto failed to take account of how other person-relevant information affects face recognition performance. Here we investigated whether distinctive or typical auditory stimuli affect the subsequent recognition of previously unfamiliar faces and whether the type of auditory stimulus matters. In this study participants learned to associate either unfamiliar distinctive and typical voices or unfamiliar distinctive and typical sounds to unfamiliar faces. The results indicated that recognition performance was better to faces previously paired with distinctive than with typical voices but we failed to find any benefit on face recognition when the faces were previously associated with distinctive sounds. These findings possibly point to an expertise effect, as faces are usually associated to voices. More importantly, it suggests that the memory for visual faces can be modified by the perceptual quality of related vocal information and more specifically that facial distinctiveness can be of a multi-sensory nature. These results have important implications for our understanding of the structure of memory for person identification.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0010027714002868}, state = {published}, DOI = {10.1016/j.cognition.2014.12.006}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ ZhaoB2015_2, title = {Memory of Own- and Other-Race Faces: Influences of Encoding and Retention Processes}, year = {2015}, month = {3}, day = {12}, pages = {32}, abstract = {We demonstrate that both encoding and memory processes affect recognition of own- and other-race faces differently. Static own-race faces are better recognized than static other-race faces but this other-race effect is not found for rigidly moving faces. Further, this effect is larger in short-term memory than in long-term memory.}, web_url = {http://www.psychologicalscience.org/convention/icps_program/pdf/Poster-Session-I.pdf}, event_name = {International Convention of Psychological Science (ICPS 2015)}, event_place = {Amsterdam, The Netherlands}, state = {published}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2015, title = {Direct haptic feedback benefits control performance during steering}, year = {2015}, month = {3}, day = {10}, pages = {249-250}, abstract = {Haptic feedback can be introduced in control devices to improve steering performance, such as in driving and flying scenarios. For example, direct haptic feedback (DHF) can be employed to guide the operator towards an optimal trajectory. It remains unclear how DHF magnitude could interact with user performance. A weak DHF might not be perceptible to the user, while a large DHF could result in overreliance. To assess the influence of DHF, five naive participants performed a compensatory tracking task across different DHF magnitudes. During the task, participants were seated in front of an artificial horizon display and were asked to compensate for externally induced disturbances in the roll dimension by manipulating a control joystick. Our results indicate that haptic feedback benefits steering performance across all tested DHF levels. This benefit increases linearly with increasing DHF magnitude. Interestingly, shared control performance was always inferior to the same DHF system without human input. This could be due to involuntary resistance that results from the arm-dynamics.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Symeonidou E-R{esymeonidou}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ FladBC2015, title = {Simultaneous EEG and eye-movement recording in a visual scanning task}, year = {2015}, month = {3}, day = {10}, pages = {81}, abstract = {Eye-movements can result in large artifacts in the EEG signal that could potentially obscure weaker cortically-based signals. Therefore, EEG studies are typically designed to minimize eyemovements [although see, Plöchl et al., 2012; Dimigen et al., 2011]. We present methods for simultaneous EEG and eye-tracking recordings in a visual scanning task. Participants were required to serially attend to four area-of-interests to detect a visual target. We compare EEG results, which were recorded either in the presence or absence of natural eye-movements. Furthermore, we demonstrate how natural eye-movement fixations can be reconstructed from the EOG signal, in a way that is comparable to the input from a simultaneous video-based eye-tracker. Based on these fixations, we address how EEG data can be segmented according to eye-movements (as opposed to experimentally timed stimuli). Finally, we explain how eyemovement induced artifacts can be effectively removed via independent component analysis (ICA), which allows EEG components to be classified as having either a 'cortical' or 'noncortical' origin. These methods offer the potential of measuring robust EEG signals even in the presence of natural eye-movements.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ GlatzBC2015, title = {Sounds with time-to-contact properties are processed preferentially}, year = {2015}, month = {3}, day = {10}, pages = {93}, abstract = {Sounds with rising intensities are known to be more salient than their constant amplitude counterparts [Seifritz et al., 2002]. Incorporating a time-to-contact characteristic into the rising profile can further increase their perceived saliency [Gray, 2011]. We investigated whether looming sounds with this time-to-contact profile might be especially effective as warning signals. Nine volunteers performed a primary steering task whilst occasionally discriminating oriented Gabor patches that were presented in their visual periphery. These visual stimuli could be preceded by an auditory warning cue, 1 second before they appeared. The 2000 Hz tone could have an intensity profile that was either constant (65 dB), linearly rising (60 - 75 dB, ramped tone), or exponentially increasing (looming tone). Overall, warning cues resulted in significantly faster and more sensitive detections of the visual targets. More importantly, we found that EEG potentials to the looming tone were significantly earlier and sustained for longer, compared to both the constant and ramped tones. This suggests that looming sounds are processed preferentially because of time-to-contact cues rather than rising intensity alone.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ ScheerBC2015, title = {Measuring workload during steering: A novelty-P3 study}, year = {2015}, month = {3}, day = {9}, pages = {220}, abstract = {The workload of a given task, such as steering, can be defined as the demand that it places on the limited attentional and cognitive resources of a driver. Given this, an increase in workload should reduce the amount of resources that are available for other tasks. For example, increasing workload in a primary steering task can decrease attention to oddball targets in a secondary auditory detection task. This can diminish the amplitude of its event-related potential (i.e., P3; Wickens et al., 1984). Here, we present a novel approach that does not require the participant to perform a secondary task. During steering, participants experienced a threestimuli oddball paradigm, where pure tones were intermixed with infrequently presented, unexpected environmental sounds (e.g., cat meowing). Such sounds are known to elicit a subcomponent of the P3, namely novelty-P3. Novelty-P3 reflects a passive shift of attention, which also applies to task-irrelevant events, thus removing the need for a secondary task (Ullsperger et al., 2001). We found that performing a manual steering task attenuated the amplitude of the novelty-P3, elicited by task-irrelevant novel sounds. The presented paradigm could be a viable approach to estimate workload in real-world scenarios.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ ChuangNWB2015, title = {Learning anticipatory eye-movements for control}, year = {2015}, month = {3}, day = {9}, pages = {58}, abstract = {Anticipatory eye-movements (or look-ahead fixations) are often observed in complex closedloop control tasks, such as steering a vehicle on a non-straight path (Land & Lee, 1994). This eye-movement behavior allows the observer to switch between different visual cues that are relevant for minimizing present and future control errors (Wilkie, Wann, & Allison, 2008). Here, we asked: Are anticipatory eye-movements generic or are they acquired according to the learning environment? We trained and tested 27 participants on a control system, which simulated the simplified dynamics of a rotorcraft. Participants had to translate laterally along a specified path while maintaining a fixed altitude. Ground and vertical landmarks provided respective visual cues. Training took place under one of three possible field-of-view conditions (height x width: 60° x 60°; 60° x 180°; 125° x 180°), while testing took place in an unrestricted field-of-view environment (125° x 230°). We found that restricting the field-of-view during training significantly decreases the number of anticipatory eye-movements during testing. This effect can be largely attributed to the size of the horizontal field-of-view. Our finding suggests that anticipatory eye-movements for closed-loop control are shaped by the conditions of the training environment.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Walter J{jwalter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Book{ Dobs2015, title = {Behavioral and Neural Mechanisms Underlying Dynamic Face Perception}, year = {2015}, pages = {108}, abstract = {Dynamic faces are highly complex, ecologically and socially relevant stimuli which we encounter almost everyday. When and what we extract from this rich source of information needs to be well coordinated by the face perception system. The current thesis investigates how this coordination is achieved. Part I comprises two psychophysical experiments examining the mechanisms underlying facial motion processing. Facial motion is represented as high-dimensional spatio-temporal data defining which part of the face is moving in which direction over time. Previous studies suggest that facial motion can be adequately represented using simple approximations. I argue against the use of synthetic facial motion by showing that the face perception system is highly sensitive towards manipulations of the natural spatio-temporal characteristics of facial motion. The neural processes coordinating facial motion processing may rely on two mechanisms: first, a sparse but meaningful spatio-temporal code representing facial motion; second, a mechanism that extracts distinctive motion characteristics. Evidence for the latter hypothesis is provided by the observation that facial motion, when performed in unconstrained contexts, helps identity judgments. Part II presents a functional magnetic resonance imaging (fMRI) study investigating the neural processing of expression and identity information in dynamic faces. Previous studies proposed a distributed neural system for face perception which distinguishes between invariant (e.g., identity) and changeable (e.g., expression) aspects of faces. Attention is a potential candidate mechanism to coordinate the processing of these two facial aspects. Two findings support this hypothesis: first, attention to expression versus identity of dynamic faces dissociates cortical areas assumed to process changeable aspects from those involved in discriminating invariant aspects of faces; second, attention leads to a more precise neural representation of the attended facial feature. Interactions between these two representations may be mediated by a part of the inferior occipital gyrus and the superior temporal sulcus which is supported by the observation that the latter area represented both expression and identity, while the first represented identity information irrespective of the attended feature.}, web_url = {http://www.logos-verlag.de/cgi-bin/engbuchmid?isbn=3910&lng=deu&id=}, publisher = {Logos Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 40}, state = {published}, ISBN = {978-3-8325-3910-8}, DOI = {10.15496/publikation-4008}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}} } @Book{ Esins2015, title = {Face processing in congenital prosopagnosia}, year = {2015}, pages = {137}, abstract = {Face recognition is one of the most important abilities for everyday social interactions. Congenital prosopagnosia, also referred to as glqqq face blindness", describes the innate, lifelong impairment to recognize other people by their face. About 2 % of the population is affected. This thesis aimed to investigate different aspects of face processing in prosopagnosia in order to gain a clearer picture and a better understanding of this heterogeneous impairment. In a first study, various aspects of face recognition and perception were investigated to allow for a better understanding of the nature of prosopagnosia. The results replicated previous findings and helped to resolve discrepancies between former studies. In addition, it was found that prosopagnosics show an irregular response behavior in tests for holistic face recognition. We propose that prosopagnosics either switch between strategies or respond randomly when performing these tests. In a second study, the general face recognition deficit observed in prosopagnosia was compared to face recognition deficits occurring when dealing with other-race faces. Most humans find it hard to recognize faces of an unfamiliar race, a phenomenon called the "other-race effect". The study served to investigate if there is a possible common mechanism underlying prosopagnosia and the other-race effect, as both are characterized by problems in recognizing faces. The results allowed to reject this hypothesis, and yielded new insights about similarities and dissimilarities between prosopagnosia and the other-race effect. In the last study, a possible treatment of prosopagnosia was investigated. This was based on a single case in which a prosopagnosic reported a sudden improvement of her face recognition abilities after she started a special diet. The different studies cover diverse aspects of prosopagnosia: the nature of prosopagnosia and measurement of its characteristics, comparison to other face recognition impairments, and treatment options. The results serve to broaden the knowledge about prosopagnosia and to gain a more detailed picture of this impairment.}, web_url = {http://www.logos-verlag.de/cgi-bin/engbuchmid?isbn=3983&lng=deu&id=}, publisher = {Logos Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 43}, state = {published}, ISBN = {978-3-8325-3983-2}, DOI = {10.15496/publikation-5519}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}} } @Book{ Kaulard2015, title = {Visual Perception of Emotional and Conversational Facial Expressions}, year = {2015}, pages = {224}, abstract = {One of the defining attributes of the human species is sophisticated communication, for which facial expressions are crucial. Traditional research has so far mainly investigated a minority of 6 basic emotional expressions displayed as pictures. Despite the important insights of this approach, its ecological validity is limited: facial movements express more than emotions, and facial expressions are more than just pictures. The objective of the present thesis is therefore to improve the understanding of facial expression recognition by investigating the internal representations of a large range of facial expressions, displayed both as static pictures and as dynamic videos. To this end, it was necessary to develop and validate a new facial expression database which includes 20.000 stimuli of 55 expressions (study 1). Perceptual representations of the six basic emotional expressions were found previously to rely on evaluation of valence and arousal; study 2 showed that this evaluation generalises to many more expressions, particularly when displayed as videos. While it is widely accepted that knowledge influences perception, how these are linked is largely unknown; study 3 investigated this question by asking how knowledge about facial expressions, instantiated as conceptual representations, relates to perceptual representations of these expressions. A strong link was found which changed with the kind of expressions and the type of display. In probably the most extensive behavioural studies (with regards to the number of facial expressions used) to date, this thesis suggests that there are commonalities but also differences in processing of emotional and of other types of facial expressions. Thus, to understand facial expression processing, one needs to consider more than the 6 basic emotional expressions. These findings outline first steps towards a new domain in facial expression research, which has implications for a number of research and application fields where facial expressions play a role, ranging from social, developmental, and clinical psychology to computer vision and affective computing research.}, web_url = {http://www.logos-verlag.de/cgi-bin/buch/isbn/3969}, publisher = {Logos Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 41}, state = {published}, ISBN = {978-3-8325-3969-6}, DOI = {10.15496/publikation-5336}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Inbook{ SchultzIP2014, title = {Applications of Information Theory to Analysis of Neural Data}, year = {2015}, pages = {199-203}, abstract = {Information theory is a practical and theoretical framework developed for the study of communication over noisy channels. Its probabilistic basis and capacity to relate statistical structure to function make it ideally suited for studying information flow in the nervous system. It has a number of useful properties: it is a general measure sensitive to any relationship, not only linear effects; it has meaningful units which in many cases allow direct comparison between different experiments; and it can be used to study how much information can be gained by observing neural responses in single trials, rather than in averages over multiple trials. A variety of information-theoretic quantities are commonly used in neuroscience – (see entry “Definitions of Information-Theoretic Quantities”). In this entry we review some applications of information theory in neuroscience to study encoding of information in both single neurons and neuronal populations.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4614-7320-6_280-1.pdf}, editor = {Jaeger, D. , R. Jung}, publisher = {Springer}, address = {New York, NY, USA}, booktitle = {Encyclopedia of Computational Neuroscience}, state = {published}, ISBN = {978-1-4614-6674-1}, DOI = {10.1007/978-1-4614-7320-6_280-1}, author = {Schultz SR; Ince RAA; Panzeri S{stefano}} } @Inbook{ InceSP2014, title = {Estimating Information-Theoretic Quantities}, year = {2015}, pages = {1137-1148}, abstract = {Information theory is a practical and theoretic framework developed for the study of communication over noisy channels. Its probabilistic basis and capacity to relate statistical structure to function make it ideally suited for studying information flow in the nervous system. It has a number of useful properties: it is a general measure sensitive to any relationship, not only linear effects; it has meaningful units which in many cases allow direct comparison between different experiments; and it can be used to study how much information can be gained by observing neural responses in single trials, rather than in averages over multiple trials. A variety of information-theoretic quantities are in common use in neuroscience (see entry “Summary of Information Theoretic Quantities”). Estimating these quantities in an accurate and unbiased way from real neurophysiological data frequently presents challenges, which are explained in this entry.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4614-7320-6_140-1.pdf}, editor = {Jaeger, D. , R. Jung}, publisher = {Springer}, address = {New York, NY, USA}, booktitle = {Encyclopedia of Computational Neuroscience}, state = {published}, ISBN = {978-1-4614-6674-1}, DOI = {10.1007/978-1-4614-6675-8_140}, author = {Ince RAA; Schultz SR; Panzeri S{stefano}} } @Inbook{ IncePS2014, title = {Summary of Information Theoretic Quantities}, year = {2015}, pages = {2924-2928}, abstract = {Information theory is a practical and theoretic framework developed for the study of communication over noisy channels. Its probabilistic basis and capacity to relate statistical structure to function make it ideally suited for studying information flow in the nervous system. As a framework, it has a number of useful properties: it provides a general measure sensitive to any relationship, not only linear effects; its quantities have meaningful units which, in many cases, allow a direct comparison between different experiments; and it can be used to study how much information can be gained by observing neural responses in single experimental trials rather than in averages over multiple trials. A variety of information theoretic quantities are in common use in neuroscience – including the Shannon entropy, Kullback–Leibler divergence, and mutual information. In this entry, we introduce and define these quantities. Further details on how these quantities can be estimated in practice are provided in the entry “Estimation of Information-Theoretic Quantities,” and examples of application of these techniques in neuroscience can be found in the entry “Applications of Information Theory to Analysis of Neural Data.”}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4614-7320-6_306-1.pdf}, editor = {Jaeger, D. , R. Jung}, publisher = {Springer}, address = {New York, NY, USA}, booktitle = {Encyclopedia of Computational Neuroscience}, state = {published}, ISBN = {978-1-4614-6674-1}, DOI = {10.1007/978-1-4614-6675-8_306}, author = {Ince RAA; Panzeri S{stefano}; Schultz SR} } @Inbook{ BulthoffALB2015, title = {The Other-Race Effect Revisited: No Effect for Faces Varying in Race Only}, year = {2015}, pages = {153-165}, abstract = {The other-race effect refers to the observation that we perform better in tasks involving faces of our own race compared to faces of a race we are not familiar with. This is especially interesting as from a biological perspective, the category “race” does in fact not exist (Cosmides L, Tooby J, Krurzban R, Trends Cogn Sci 7(4):173–179, 2003); visually, however, we do group the people around us into such categories. Usually, the other-race effect is investigated in memory tasks where observers have to learn and subsequently recognize faces of individuals of different races (Meissner CA, Brigham JC, Psychol Public Policy Law 7(1):3–35, 2001) but it has also been demonstrated in perceptual tasks where observers compare one face to another on a screen (Walker PM, Tanaka J, Perception 32(9):1117–1125, 2003). In all tasks (and primarily for technical reasons) the test faces differ in race and identity. To broaden our general understanding of the effect that the race of a face has on the observer, in the present study, we investigated whether an other-race effect is also observed when participants are confronted with faces that differ only in ethnicity but not in identity. To that end, using Asian and Caucasian faces and a morph algorithm (Blanz V, Vetter T, A morphable model for the synthesis of 3D faces. In: Proceedings of the 26th annual conference on Computer graphics and interactive techniques – SIGGRAPH’99, pp 187–194, 1999), we manipulated each original Asian or Caucasian face to generate face “race morphs” that shared the same identity but whose race appearance was manipulated stepwise toward the other ethnicity. We presented each Asian or Caucasian face pair (original face and a race morph) to Asian (South Korea) and Caucasian (Germany) participants who had to judge which face in each pair looked “more Asian” or “more Caucasian”. In both groups, participants did not perform better for same-race pairs than for other-race pairs. These results point to the importance of identity information for the occurrence of an other-race effect.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-94-017-7239-6_10.pdf}, editor = {Lee, S.-W. , H.H. Müller, K.-R. Müller}, publisher = {Springer}, address = {Dordrecht, The Netherlands}, series = {Trends in Augmentation of Human Performance ; 5}, booktitle = {Recent Progress in Brain and Cognitive Engineering}, state = {published}, ISBN = {978-94-017-7238-9}, DOI = {10.1007/978-94-017-7239-6_10}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Armann RGM{armann}{Department Human Perception, Cognition and Action}; Lee RK{ryokyung}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ ZhaoHB2014_2, title = {Holistic processing, contact, and the other-race effect in face recognition}, journal = {Vision Research}, year = {2014}, month = {12}, volume = {105}, pages = {61–69}, abstract = {Face recognition, holistic processing, and processing of configural and featural facial information are known to be influenced by face race, with better performance for own- than other-race faces. However, whether these various other-race effects (OREs) arise from the same underlying mechanisms or from different processes remain unclear. The present study addressed this question by measuring the OREs in a set of face recognition tasks, and testing whether these OREs are correlated with each other. Participants performed different tasks probing (1) face recognition, (2) holistic processing, (3) processing of configural information, and (4) processing of featural information for both own- and other-race faces. Their contact with other-race people was also assessed with a questionnaire. The results show significant OREs in tasks testing face memory and processing of configural information, but not in tasks testing either holistic processing or processing of featural information. Importantly, there was no cross-task correlation between any of the measured OREs. Moreover, the level of other-race contact predicted only the OREs obtained in tasks testing face memory and processing of configural information. These results indicate that these various cross-race differences originate from different aspects of face processing, in contrary to the view that the ORE in face recognition is due to cross-race differences in terms of holistic processing.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0042698914002144}, state = {published}, DOI = {10.1016/j.visres.2014.09.006}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; Hayward WG; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ LeeLLPCWBC2014, title = {Psychological distress and attentional bias toward acne lesions in patients with acne}, journal = {Psychology, Health & Medicine}, year = {2014}, month = {12}, volume = {19}, number = {6}, pages = {680-686}, abstract = {Acne vulgaris is a common inflammatory disease that manifests on the face and affects appearance. In general, facial acne has a wide-ranging negative impact on the psychosocial functioning of acne sufferers and leaves physical and emotional scars. In the present study, we investigated whether patients with acne vulgaris demonstrate enhanced psychological bias when assessing the attractiveness of faces with acne symptoms and whether they devote greater selective attention to acne lesions than to acne-free (control) individuals. Participants viewed images of faces under two different skin (acne vs. acne-free) and emotional facial expression (happy and neutral) conditions. They rated the attractiveness of the faces, and the time spent fixating on the acne lesions was recorded with an eye tracker. We found that the gap in perceived attractiveness between acne and acne-free faces was greater for acne sufferers. Furthermore, patients with acne fixated longer on facial regions exhibiting acne lesions than did control participants irrespective of the facial expression depicted. In summary, patients with acne have a stronger attentional bias for acne lesions and focus more on the skin lesions than do those without acne. Clinicians treating the skin problems of patients with acne should consider these psychological and emotional scars.}, web_url = {http://www.tandfonline.com/doi/abs/10.1080/13548506.2014.880493#.VCABIleaSQA}, state = {published}, DOI = {10.1080/13548506.2014.880493}, author = {Lee I-S; Lee A-R; Lee H; Park H-J; Chung S-Y; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Chae Y} } @Thesis{ Dobs2014, title = {Behavioral and Neural Mechanisms Underlying Dynamic Face Perception}, year = {2014}, month = {12}, state = {published}, type = {PhD}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}} } @Thesis{ Kaulard2014, title = {Visual perception of emotional and conversational facial expressions}, year = {2014}, month = {12}, state = {published}, type = {PhD}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2014_10, title = {Faces, Face Categories and the Other-Race Effect}, year = {2014}, month = {11}, day = {30}, event_name = {IZN Retreat 2014 "Neuroscience Perspectives: Transition from Science to Industry and Back"}, event_place = {Kloster Schöntal, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ EsinsSWB2014, title = {Do congenital prosopagnosia and the other-race effect affect the same face recognition mechanisms?}, journal = {Frontiers in Human Neuroscience}, year = {2014}, month = {9}, volume = {8}, number = {759}, pages = {1-14}, abstract = {Congenital prosopagnosia, an innate impairment in recognizing faces, as well as the other-race effect, a disadvantage in recognizing faces of foreign races, both affect face recognition abilities. Are the same face processing mechanisms affected in both situations? To investigate this question, we tested three groups of 21 participants: German congenital prosopagnosics, South Korean participants and German controls in three different tasks involving faces and objects. First we tested all participants on the Cambridge Face Memory Test in which they had to recognize Caucasian target faces in a 3-alternative-forced-choice task. German controls performed better than Koreans who performed better than prosopagnosics. In the second experiment, participants rated the similarity of Caucasian faces that differed parametrically in either features or second-order relations (configuration). Prosopagnosics were less sensitive to configuration changes than both other groups. In addition, while all groups were more sensitive to changes in features than in configuration, this difference was smaller in Koreans. In the third experiment, participants had to learn exemplars of artificial objects, natural objects, and faces and recognize them among distractors of the same category. Here prosopagnosics performed worse than participants in the other two groups only when they were tested on face stimuli. In sum, Koreans and prosopagnosic participants differed from German controls in different ways in all tests. This suggests that German congenital prosopagnosics perceive Caucasian faces differently than do Korean participants. Importantly, our results suggest that different processing impairments underlie the other-race effect and congenital prosopagnosia.}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fnhum.2014.00759/pdf}, state = {published}, DOI = {10.3389/fnhum.2014.00759}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ EsinsSBK2013, title = {Galactose uncovers face recognition and mental images in congenital prosopagnosia: The first case report}, journal = {Nutritional Neuroscience}, year = {2014}, month = {9}, volume = {17}, number = {5}, pages = {239-240}, abstract = {A woman in her early 40s with congenital prosopagnosia and attention deficit hyperactivity disorder observed for the first time sudden and extensive improvement of her face recognition abilities, mental imagery, and sense of navigation after galactose intake. This effect of galactose on prosopagnosia has never been reported before. Even if this effect is restricted to a subform of congenital prosopagnosia, galactose might improve the condition of other prosopagnosics. Congenital prosopagnosia, the inability to recognize other people by their face, has extensive negative impact on everyday life. It has a high prevalence of about 2.5%. Monosaccharides are known to have a positive impact on cognitive performance. Here, we report the case of a prosopagnosic woman for whom the daily intake of 5 g of galactose resulted in a remarkable improvement of her lifelong face blindness, along with improved sense of orientation and more vivid mental imagery. All these improvements vanished after discontinuing galactose intake. The self-reported effects of galactose were wide-ranging and remarkably strong but could not be reproduced for 16 other prosopagnosics tested. Indications about heterogeneity within prosopagnosia have been reported; this could explain the difficulty to find similar effects in other prosopagnosics. Detailed analyses of the effects of galactose in prosopagnosia might give more insight into the effects of galactose on human cognition in general. Galactose is cheap and easy to obtain, therefore, a systematic test of its positive effects on other cases of congenital prosopagnosia may be warranted.}, web_url = {http://www.maneyonline.com/doi/abs/10.1179/1476830513Y.0000000091}, state = {published}, DOI = {10.1179/1476830513Y.0000000091}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Kennerknecht I} } @Inproceedings{ ScheerBC2014, title = {Is the novelty-P3 suitable for indexing mental workload in steering tasks?}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S135-S137}, abstract = {Difficulties experienced in steering a vehicle can be expected to place a demand on one’s mental resources (O’Donnell, Eggemeier 1986). While the extent of this mental workload (MWL) can be estimated by self-reports (e.g., NASA-TLX; Hart, Staveland 1988), it can also be physiologically evaluated in terms of how a primary task taxes a common and limited pool of mental resources, to the extent that it reduces the electroencephalographic (EEG) responses to a secondary task (e.g. an auditory oddball task). For example, the participant could be primarily required to control a cursor to track a target while attending to a series of auditory stimuli, which would infrequently present target tones that should be responded to with a button-press (e.g., Wickens, Kramer, Vanasse and Donchin 1983). Infrequently presented targets, termed oddballs, are known to elicit a large positive potential after approximately 300 ms of their presentation (i.e.,P3). Indeed, increasing tracking difficulty either by decreasing the predictability of the tracked target or by changing the complexity of the controller dynamics has been shown to attenuate P3 responses in the secondary auditory monitoring task (Wickens et al. 1983; Wickens, Kramer and Donchin 1984). In contrast, increasing tracking difficulty—by introducing more frequent direction changes of the tracked target (i.e. including higher frequencies in the function that describes the motion trajectory of the target)—has been shown to bear little influence on the secondary task’s P3 response (Wickens, Israel and Donchin 1977; Isreal, Chesney, Wickens and Donchin 1980). Overall, the added requirement of a steering task consistently results in a lower P3 amplitude, relative to performing auditory monitoring alone (Wickens et al. 1983; Wickens et al. 1977; Isreal et al. 1980). Using a dual-task paradigm for indexing workload is not ideal. First, it requires participants to perform a secondary task. This prevents it from being applied in real-world scenarios; users cannot be expected to perform an unnecessary task that could compromise their critical work performance. Second, it can only be expected to work if the performance of the secondary task relies on the same mental resources as those of the primary task (Wickens, Yeh 1983), requiring a deliberate choice of the secondary task. Thus, it is fortunate that more recent studies have demonstrated that P3 amplitudes can be sensitive to MWL, even if the auditory oddball is ignored (Ullsperger, Freude and Erdmann 2001; Allison, Polich 2008). This effect is said to induce a momentary and involuntary shift in general attention, especially if recognizable sounds (e.g. a dog bark, opposed to a pure sound) are used (Miller, Rietschel, McDonald and Hatfield 2011). The current work, containing two experiments, investigates the conditions that would allow ‘novelty-P3’, the P3 elicited by the ignored, recognizable oddball, to be an effective index for the MWL of compensatory tracking. Compensatory tracking is a basic steering task that can be generalized to most implementations of vehicular control. In both experiments participants were required to use a joystick to counteract disturbances of a horizontal plane. To evaluate the generalizability of this paradigm, we depicted this horizontal plane as either a line in a simplified visualization or as the horizon in a realworld environment. In the latter, participants experienced a large field-of-view perspective of the outside world from the cockpit of an aircraft that rotated erratically about its heading axis. The task was the same regardless of the visualization. In both experiments, we employed a full factorial design for the visualization (instrument, world) and 3 oddball paradigms (in experiment 1) or 4 levels of task difficulty (in experiment 2) respectively. Two sessions were conducted on separate days for the different visualizations, which were counter-balanced for order. Three trials were presented per oddball paradigm (experiment 1) or level of task difficulty (experiment 2) in blocks, which were randomized for order. Overall, we found that steering performance was worse when the visualization was provided by a realistic world environment in experiments 1 (F (1, 11) = 42.8, p\0.01) and 2 (F (1, 13) = 35.0, p\0.01). Nonetheless, this manipulation of visualization had no consequence on our participants’ MWL as evaluated by a post-experimental questionnaire (i.e., NASATLX) and EEG responses. This suggests that MWL was unaffected by our choice of visualization. The first experiment, with 12 participants, was designed to identify the optimal presentation paradigm of the auditory oddball. For the EEG analysis, two participants had to be excluded, due to noisy electrophysiological recordings (more than 50 % of rejected epochs). Whilst performing the tracking task, participants were presented with a sequence of auditory stimuli that they were instructed to ignore. This sequence would, in the 1-stimulus paradigm, only contain the infrequent odd-ball stimulus (i.e., the familiar sound of a dog’s bark (Fabiani, Kazmerski, Cycowicz and Friedmann 1996)). In the 2-stimulus paradigm this infrequently presented oddball (0.1) is accompanied by a more frequently presented pure tone (0.9) and in the 3-stimulus paradigm the infrequently presented oddball (0.1) is accompanied by a more frequently presented pure tone (0.8) and an infrequently presented pure tone (0.1). These three paradigms are widely used in P3 research (Katayama, Polich 1996). It should be noted, however, that the target to target interval is 20 s regardless of the paradigm. To obtain the ERPs the epochs from 100 ms before to 900 ms after the onset of the recognizable oddball stimulus, were averaged. Mean amplitude measurements were obtained in a 60 ms window, centered at the group- mean peak latency for the largest positive maximum component between 250 and 400 ms for the oddball P3, for each of the three mid-line electrode channels of interest (i.e., Fz, Cz, Pz). In agreement with previous work, the novelty-P3 response is smaller when participants had to perform the tracking task compared to when they were only presented with the task-irrelevant auditory stimuli, without the tracking task (F (1, 9) = 10.9, p\0.01). However, the amplitude of the novelty-P3 differed significantly across the presentation paradigms (F (2, 18) = 5.3, p\0.05), whereby the largest response to our task-irrelevant stimuli was elicited by the 1- stimulus oddball paradigm. This suggests that the 1-stimulus oddball paradigm is most likely to elicit novelty-P3 s that are sensitive to changes in MWL. Finally, the attenuation of novelty-P3 amplitudes by the tracking task varied across the three mid-line electrodes (F (2, 18) = 28.0, p\0.001). Pairwise comparison, Bonferroni corrected for multiple comparisons, revealed P3 amplitude to be largest at Cz, followed by Fz and smallest at Pz (all p\0.05). This stands in contrast with previous work that found control difficulty to attenuate P3 responses in parietal electrodes (cf., Isreal et al. 1980; Wickens et al. 1983). Thus, the current paradigm that uses a recognizable, ignored sound is likely to reflect an underlying process that is different from previous studies, which could be more sensitive to the MWL demands of a tracking task. Given the result of experiment 1, the second experiment with 14 participants, investigated whether the 1-stimulus oddball paradigm would be sufficiently sensitive in indexing tracking difficulty as defined by the bandwidth of frequencies that contributed to the disturbance of the horizontal plane (cf., Isreal et al. 1980). Three different bandwidth profiles (easy, medium, hard) defined the linear increase in the amount of disturbance that had to be compensated for. This manipulation was effective in increasing subjective MWL, according to the results of a post- experimental NASA-TLX questionnaire (F (2, 26) = 14.9, p\0.001) and demonstrated the expected linear trend (F (1, 13) = 23.2, p\0.001). This increase in control effort was also reflected in the amount of joystick activity, which grew linearly across the difficulty conditions (F (1, 13) = 42.2, p\0.001). For the EEG analysis two participants had to be excluded due to noisy electrophysiological recordings (more than 50 % of rejected epochs). A planned contrast revealed that the novelty- P3 was significantly lower in the most difficult condition compared to the baseline viewing condition, where no tracking was done (F (1, 11) = 5.2, p\0.05; see Fig. 1a). Nonetheless, novelty-P3 did not differ significantly between the difficulty conditions (F (2, 22) = 0.13, p = 0.88), nor did it show the expected linear trend (F (1, 11) = 0.02, p = 0.91). Like (Isreal et al. 1980), we find that EEGresponses do not discriminate for MWL that is associated with controlling increased disturbances. It remains to be investigated, whether the novelty-P3 is sensitive for the complexity of controller dynamics, like it has been shown for the P3. The power spectral density of the EEG data around 10 Hz (i.e., alpha) has been suggested by (Smith, Gevins 2005) to index MWL. A post hoc analysis of our current data, at electrode Pz, revealed that alpha power was significantly lower for the medium and hard conditions, relative to the view-only condition (F (1, 11) = 6.081, p\0.05; (F (1, 11) = 6.282, p\0.05). Nonetheless, the expected linear trend across tracking difficulty was not significant (Fig. 1b). To conclude, the current results suggest that a 1-stimulus oddball task ought to be preferred when measuring general MWL with the novelty-P3. Although changes in novelty-P3 can identify the control effort required in our compensatory tracking task, it is not sufficiently sensitive to provide a graded response across different levels of disturbances. In this regard, it may not be as effective as self-reports and joystick activity in denoting control effort. Nonetheless, further research can improve upon the sensitivity of EEG metrics to MWL by investigating other aspects that better correlate to the specific demands of a steering task.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, editor = {Butz, M.V.}, publisher = {Springer}, address = {Berlin, Germany}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ GlatzBC2014, title = {Looming auditory warnings initiate earlier event-related potentials in a manual steering task}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S38}, abstract = {Automated collision avoidance systems promise to reduce accidents and relieve the driver from the demands of constant vigilance. Such systems direct the operator’s attention to potentially critical regions of the environment without compromising steering performance. This raises the question: What is an effective warning cue? Sounds with rising intensities are claimed to be especially salient. By evoking the percept of an approaching object, they engage a neural network that supports auditory space perception and attention (Bach et al. 2008). Indeed, we are aroused by and faster to respond to ‘looming’ auditory tones, which increase heart rate and skin conductance activity (Bach et al. 2009). Looming sounds can differ in terms of their rising intensity profiles. While it can be approximated by a sound whose amplitude increases linearly with time, an approaching object that emits a constant tone is better described as having an amplitude that increases exponentially with time. In a driving simulator study, warning cues that had a veridical looming profile induced earlier braking responses than ramped profiles with linearly increasing loudness (Gray 2011). In the current work, we investigated how looming sounds might serve, during a primary steering task, to alert participants to the appearance of visual targets. Nine volunteers performed a primary steering task whilst occasionally discriminating visual targets. Their primary task was to minimize the vertical distance between an erratically moving cursor and the horizontal mid-line, by steering a joystick towards the latter. Occasionally, diagonally oriented Gabor patches (108 tilt; 18 diameter; 3.1 cycles/deg; 70 ms duration) would appear on either the left or right of the cursor. Participants were instructed to respond with a button-press whenever a pre-defined target appeared. Seventy percent of the time, these visual stimuli were preceded by a 1,500 ms warning tone, 1,000 ms before they appeared. Overall, warning cues resulted in significantly faster and more sensitive detections of the visual target stimuli (F1,8 = 7.72, p\0.05; F1,8 = 9.63, p\0.05). Each trial would present one of three possible warning cues. Thus, a warning cue (2,000 Hz) could either have a constant intensity of 65 dB, a ramped tone with linearly increasing intensity from 60 dB to approximately 75 dB or a comparable looming tone with an exponentially increasing intensity profile. The different warning cues did not vary in their influence of the response times to the visual targets and recognition sensitivity (F2,16 = 3.32, p = 0.06; F2,16 = 0.10, p = 0.90). However, this might be due to our small sample size. It is noteworthy that the different warning tones did not adversely affect steering performance (F2,16 = 1.65, p\0.22). Nonetheless, electroencephalographic potentials to the offset of the warning cues were significantly earlier for the looming tone, compared to both the constant and ramped tone. More specifically, the positive component of the event- related po tential was significantly earlier for the looming tone by about 200 ms, relative to the constant and ramped tone, and sustained for a longer duration (see Fig. 1). The current findings highlight the behavioral benefits of auditory warning cues. More importantly, we find that a veridical looming tone induces earlier event-related potentials than one with a linearly increasing intensity. Future work will investigate how this benefit might diminish with increasing time between the warning tone and the event that is cued for.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2014, title = {The Role of Direct Haptic Feedback in a Compensatory Tracking Task}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S71}, abstract = {Haptic feedback systems can be designed to assist vehicular steering by sharing manual control with the human operator. For example, direct haptic feedback (DHF) forces, that are applied over the control device, can guide the operator towards an optimized trajectory, which he can either augment, comply with or resist according to his preferences. DHF has been shown to improve performance (Olivari et al. submitted) and increase safety (Tsoi et al. 2010). Nonetheless, the human operator may not always benefit from the haptic support system. Depending on the amount of the haptic feedback, the operator might demonstrate an over- reliance or an opposition to this haptic assistance (Forsyth and MacLean 2006). Thus, it is worthwhile to investigate how different levels of haptic assistance influence shared control performance. The current study investigates how different gain levels of DHF influence performance in a compensatory tracking task. For this purpose, 6 participants were evenly divided into two groups according to their previous tracking experience. During the task, they had to compensate for externally induced disturbances that were visualized as the difference between a moving line and a horizontal reference standard. Briefly, participants observed how an unstable aircraft symbol, located in the middle of the screen, deviated in the roll axis from a stable artificial horizon. In order to compensate for the roll angle, participants were instructed to use the control joystick. Meanwhile, different DHF forces were presented over the control joystick for gain levels of 0, 12.5, 25, 50 and 100 %. The maximal DHF level was chosen according to the procedure described in (Olivari et al. 2014) and represents the best stable performance of skilled human operators. The participants’ performance was defined as the reciprocal of the median of the root mean square error (RMSE) in each condition. Figure 1a shows that performance improved with in- creasing DHF gain, regardless of experience levels. To evaluate the operator’s contribution, relative to the DHF contribution, we calculated the ratio of overall performance to estimated DHF performance without human input. Figure 1b shows that the subject’s contribution in both groups decreased with increasing DHF up to the 50 % condition. The contribution of experienced subjects plateaued between the 50 and 100 % DHF levels. Thus, the increase in performance for the 100 % condition can mainly be attributed to the higher DHF forces alone. In contrast, the inexperienced subjects seemed to completely rely on the DHF during the 50 % condition, since the operator’s contribution approximated 1. However, this changed for the 100 % DHF level. Here, the participants started to actively contribute to the task (operator’s contribution [1). This change in behavior resulted in performance values similar to those of the experienced group Our findings suggest that the increase of haptic support with our DHF system does not necessarily result in over-reliance and can improve performance for both experienced and inexperienced subjects.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Symeonidou E-R{esymeonidou}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Article{ ZhaoHB2014, title = {Face format at encoding affects the other-race effect in face memory}, journal = {Journal of Vision}, year = {2014}, month = {8}, volume = {14}, number = {9:6}, pages = {1-13}, abstract = {Memory of own-race faces is generally better than memory of other-races faces. This other-race effect (ORE) in face memory has been attributed to differences in contact, holistic processing, and motivation to individuate faces. Since most studies demonstrate the ORE with participants learning and recognizing static, single-view faces, it remains unclear whether the ORE can be generalized to different face learning conditions. Using an old/new recognition task, we tested whether face format at encoding modulates the ORE. The results showed a significant ORE when participants learned static, single-view faces (Experiment 1). In contrast, the ORE disappeared when participants learned rigidly moving faces (Experiment 2). Moreover, learning faces displayed from four discrete views produced the same results as learning rigidly moving faces (Experiment 3). Contact with other-race faces was correlated with the magnitude of the ORE. Nonetheless, the absence of the ORE in Experiments 2 and 3 cannot be readily explained by either more frequent contact with other-race faces or stronger motivation to individuate them. These results demonstrate that the ORE is sensitive to face format at encoding, supporting the hypothesis that relative involvement of holistic and featural processing at encoding mediates the ORE observed in face memory.}, web_url = {http://www.journalofvision.org/content/14/9/6.full.pdf+html}, state = {published}, DOI = {10.1167/14.9.6}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; Hayward WG; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ WallravenBWvG2013, title = {The eyes grasp, the hands see: Metric category knowledge transfers between vision and touch}, journal = {Psychonomic Bulletin & Review}, year = {2014}, month = {8}, volume = {21}, number = {4}, pages = {976-985}, abstract = {Categorization of seen objects is often determined by the shapes of objects. However, shape is not exclusive to the visual modality: The haptic system also is expert at identifying shapes. Hence, an important question for understanding shape processing is whether humans store separate modality-dependent shape representations, or whether information is integrated into one multisensory representation. To answer this question, we created a metric space of computer-generated novel objects varying in shape. These objects were then printed using a 3-D printer, to generate tangible stimuli. In a categorization experiment, participants first explored the objects visually and haptically. We found that both modalities led to highly similar categorization behavior. Next, participants were trained either visually or haptically on shape categories within the metric space. As expected, visual training increased visual performance, and haptic training increased haptic performance. Importantly, however, we found that visual training also improved haptic performance, and vice versa. Two additional experiments showed that the location of the categorical boundary in the metric space also transferred across modalities, as did heightened discriminability of objects adjacent to the boundary. This observed transfer of metric category knowledge across modalities indicates that visual and haptic forms of shape information are integrated into a shared multisensory representation.}, web_url = {http://link.springer.com/content/pdf/10.3758%2Fs13423-013-0563-4.pdf}, state = {published}, DOI = {10.3758/s13423-013-0563-4}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Waterkamp S{swaterka}{Department Human Perception, Cognition and Action}; van Dam L{vandam}{Research Group Multisensory Perception and Action}; Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}} } @Poster{ FademrechtBd2014_2, title = {A matter of perspective: action recognition depends on stimulus orientation in the periphery}, journal = {Perception}, year = {2014}, month = {8}, volume = {43}, number = {ECVP Abstract Supplement}, pages = {103}, abstract = {Recognizing actions of others in the periphery is required for fast and appropriate reactions to events in our environment (e.g. seeing kids running towards the street when driving). Previous results show that action recognition is surprisingly accurate even in far periphery (<=60° visual angle (VA)) when actions were directed towards the observer (front view). The front view of a person is considered to be critical for social cognitive processes (Schillbach et al., 2013). To what degree does the orientation of the observed action (front vs. profile view) influence the identification of the action and the recognition of the action's valence across the horizontal visual field? Participants saw life-size stick figure avatars that carried out one of six motion-captured actions (greeting actions: handshake, hugging, waving and aggressive actions: slapping, punching and kicking). The avatar was shown on a large screen display at different positions up to 75° VA. Participants either assessed the emotional valence of the action or identified the action either as ‘greeting’ or as ‘attack’. Orientation had no significant effect on accuracy. Reaction times were significantly faster for profile than for front views (p=0.003) for both tasks, which is surprising in light of recent suggestions}, web_url = {http://pec.sagepub.com/content/43/1_suppl.toc}, event_name = {37th European Conference on Visual Perception (ECVP 2014)}, event_place = {Beograd, Serbia}, state = {published}, DOI = {10.1177/03010066140430S101}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ ZhaoB2014, title = {Face Race Affects Various Types of Face Processing, but Affects Them Differently}, journal = {Journal of Vision}, year = {2014}, month = {8}, volume = {14}, number = {10}, pages = {1262}, abstract = {Previous studies have shown that face race influences various aspects of face processing, including face identification (Meissner & Brigham, 2001), holistic processing (Michel et al., 2006), and processing of featural and configural information (Hayward et al., 2008). However, whether these various aspects of other-race effects (ORE) arise from the same underlying mechanism or from independent ones remain unclear. To address this question, we measured those manifestations of ORE with different tasks, and tested whether the magnitude of those OREs are related to each other. Each participant performed three tasks. (1) The original and a Chinese version of Cambridge Face Memory Tests (CFMT, Duchaine & Nakayama, 2006; McKone et al., 2012), which were used to measure the ORE in face memory. (2) A part/whole sequential matching task (Tanaka et al., 2004), which was used to measure the ORE in face perception and in holistic processing. (3) A scrambled/blurred face recognition task (Hayward et al., 2008), which was used to measure the ORE in featural and configural processing. We found a better recognition performance for own-race than other-race faces in all three tasks, confirming the existence of an ORE across various tasks. However, the size of the ORE measured in all three tasks differed; we found no correlation between the OREs in the three tasks. More importantly, the two measures of the ORE in configural and holistic processing tasks could not account for the individual differences in the ORE in face memory. These results indicate that although face race always influence face recognition as well as configural and featural processing, different underlying mechanisms are responsible for the occurrence of ORE for each aspect of face processing tested here.}, web_url = {http://www.journalofvision.org/content/14/10/1262}, event_name = {14th Annual Meeting of the Vision Sciences Society (VSS 2014)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/14.10.1262}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ EsinsBS2014, title = {Facial motion does not help face recognition in congenital prosopagnosics}, journal = {Journal of Vision}, year = {2014}, month = {8}, volume = {14}, number = {10}, pages = {1436}, abstract = {Humans rely strongly on the shape of other people’s faces to recognize them. However, faces also change appearance between encounters, for example when people put on glasses or change their hair-do. This can affect face recognition in certain situations, e.g. when recognizing faces that we do not know very well or for congenital prosopagnosics. However, additional cues can be used to recognize faces: faces move as we speak, smile, or shift gaze, and this dynamic information can help to recognize other faces (Hill & Johnston, 2001). Here we tested if and to what extent such dynamic information can help congenital prosopagnosics to improve their face recognition. We tested 15 congenital prosopagnosics and 15 age- and gender matched controls with a test created by Raboy et al. (2010). Participants learned 18 target identities and then performed an old-new-judgment on the learned faces and 18 distractor faces. During the test phase, half the target faces exhibited everyday changes (e.g. modified hairdo, glasses added, etc.) while the other targets did not change. Crucially, half the faces were presented as short film sequences (dynamic stimuli) while the other half were presented as five random frames (static stimuli) during learning and test. Controls and prosopagnosics recognized identical better than changed targets. While controls recognized faces better in the dynamic than in the static condition, prosopagnosics’ performance was not better for dynamic compared to static stimuli. This difference between groups was significant. The absence of a dynamic advantage in prosopagnosics suggests that dysfunctions in congenital prosopagnosia might not only be restricted to ventral face-processing regions, but might also involve lateral temporal regions where facial motion is known to be processed (e.g. Haxby et al., 2000).}, web_url = {http://www.journalofvision.org/content/14/10/1436}, event_name = {14th Annual Meeting of the Vision Sciences Society (VSS 2014)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/14.10.1436}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ FademrechtBd2014, title = {Influence of eccentricity on action recognition}, journal = {Journal of Vision}, year = {2014}, month = {8}, volume = {14}, number = {10}, pages = {1006}, abstract = {The recognition of actions is critical for human social functioning and provides insight into both the active and the inner states (e.g. valence) of another person. Although actions often appear in the visual periphery little is known about action recognition beyond foveal vision. Related previous research showed that object recognition and object valence (i.e. positive or negative valence) judgments are relatively unaffected by presentations up to 13° visual angle (VA) (Calvo et al. 2010). This is somewhat surprising given that recognition performance of words and letters sharply decline in the visual periphery. Here participants recognized an action and evaluated its valence as a function of eccentricity. We used a large screen display that allowed presentation of stimuli over a visual field from -60 to +60° VA. A life-size stick figure avatar carried out one of six motion captured actions (3 positive actions: handshake, hugging, waving; 3 negative actions: slapping, punching and kicking). 15 participants assessed the valence of the action (positive or negative action) and another 15 participants identified the action (as fast and as accurately as possible). We found that reaction times increased with eccentricity to a similar degree for the valence and the recognition task. In contrast, accuracy performance declined significantly with eccentricity for both tasks but declined more sharply for the action recognition task. These declines were observed for eccentricities larger than 15° VA. Thus, we replicate the findings of Calvo et al. (2010) that recognition is little affected by extra-foveal presentations smaller than 15° VA. Yet, we additionally demonstrate that visual recognition performance of actions declined significantly at larger eccentricities. We conclude that large eccentricities are required to assess the effect of peripheral presentation on visual recognition.}, web_url = {http://www.journalofvision.org/content/14/10/1006}, event_name = {14th Annual Meeting of the Vision Sciences Society (VSS 2014)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/14.10.1006}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ ZhaoB2014_2, title = {Long-term memory for own- and other-race faces}, journal = {Perception}, year = {2014}, month = {8}, volume = {43}, number = {ECVP Abstract Supplement}, pages = {76}, abstract = {Many studies have demonstrated better recognition of own- than other-race faces. However, little is known about whether memories of unfamiliar own- and other-race faces decay similarly with time. We addressed this question by probing participants’ memory about own- and other-race faces both immediately after learning (immediate test) and one week later (delayed test). In both learning and test phases, participants saw short movie wherein a person was talking in front of the camera (the sound was turned off). Two main results emerged. First, we observed a cross-race deficit in recognizing other-race faces in both immediate and delayed tests, but the cross-race deficit was reduced in the latter. Second, recognizing faces immediately after learning was not better than recognizing them one week later. Instead, overall performance was even better at delayed test than at immediate test. This result was mainly due to improved recognition for other-race female faces, which showed comparatively low performance when tested immediately. These results demonstrate that memories of both own- and other-race faces sustain for a relative long time. Although other-race faces are less well recognized than own-race faces, they seem to be maintained in long-term memory as well as, and even better than, own-race faces.}, web_url = {http://pec.sagepub.com/content/43/1_suppl.toc}, event_name = {37th European Conference on Visual Perception (ECVP 2014)}, event_place = {Beograd, Serbia}, state = {published}, DOI = {10.1177/03010066140430S101}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2014_2, title = {Seeking and Processing Information During Steering}, year = {2014}, month = {7}, day = {9}, web_url = {http://docplayer.net/15633537-Crc-940-related-lectures-and-talks-from-2012-to-2016.html}, event_name = {TUD Fachrichtung Psychologie: Bühler-Kolloquium}, event_place = {Dresden, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Article{ BrielmannBA2014, title = {Looking at faces from different angles: Europeans fixate different features in Asian and Caucasian faces}, journal = {Vision Research}, year = {2014}, month = {7}, volume = {100}, pages = {105–112}, abstract = {Race categorization of faces is a fast and automatic process and is known to affect further face processing profoundly and at earliest stages. Whether processing of own- and other-race faces might rely on different facial cues, as indicated by diverging viewing behavior, is much under debate. We therefore aimed to investigate two open questions in our study: 1) Do observers consider information from distinct facial features informative for race categorization or do they prefer to gain global face information by fixating the geometrical center of the face? 2) Does the fixation pattern, or, if facial features are considered relevant, do these features differ between own- and other-race faces? We used eye tracking to test where European observers look when viewing Asian and Caucasian faces in a race categorization task. Importantly, in order to disentangle centrally located fixations from those towards individual facial features, we presented faces in frontal, half-profile and profile views. We found that observers showed no general bias towards looking at the geometrical center of faces, but rather directed their first fixations towards distinct facial features, regardless of face race. However, participants looked at the eyes more often in Caucasian faces than in Asian faces, and there were significantly more fixations to the nose for Asian compared to Caucasian faces. Thus, observers rely on information from distinct facial features rather than facial information gained by centrally fixating the face. To what extent specific features are looked at is determined by the face’s race.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0042698914000984}, state = {published}, DOI = {10.1016/j.visres.2014.04.011}, author = {Brielmann AA{abrielmann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Armann R{armann}{Department Human Perception, Cognition and Action}} } @Article{ DobsBBVCS2014, title = {Quantifying human sensitivity to spatio-temporal information in dynamic faces}, journal = {Vision Research}, year = {2014}, month = {7}, volume = {100}, pages = {78–87}, abstract = {A great deal of perceptual and social information is conveyed by facial motion. Here, we investigated observers’ sensitivity to the complex spatio-temporal information in facial expressions and what cues they use to judge the similarity of these movements. We motion-captured four facial expressions and decomposed them into time courses of semantically meaningful local facial actions (e.g., eyebrow raise). We then generated approximations of the time courses which differed in the amount of information about the natural facial motion they contained, and used these and the original time courses to animate an avatar head. Observers chose which of two animations based on approximations was more similar to the animation based on the original time course. We found that observers preferred animations containing more information about the natural facial motion dynamics. To explain observers’ similarity judgments, we developed and used several measures of objective stimulus similarity. The time course of facial actions (e.g., onset and peak of eyebrow raise) explained observers’ behavioral choices better than image-based measures (e.g., optic flow). Our results thus revealed observers’ sensitivity to changes of natural facial dynamics. Importantly, our method allows a quantitative explanation of the perceived similarity of dynamic facial expressions, which suggests that sparse but meaningful spatio-temporal cues are used to process facial motion.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0042698914000960}, state = {published}, DOI = {10.1016/j.visres.2014.04.009}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2014, title = {Understanding the Human Operator in Man-Machine Systems for Closed-Loop Control Behavior}, year = {2014}, month = {6}, day = {29}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ BulthoffZ2014, title = {Perception of Race Information in Same-Race and Other-Race Faces}, year = {2014}, month = {6}, day = {28}, web_url = {http://tuebingen.mpg.de/startseite/detail/conference-on-brain-and-cognitive-engineering.html}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Zhao M{mzhao}{Department Human Perception, Cognition and Action}} } @Article{ DavidSMSSMSVE2013, title = {Right Temporoparietal Gray Matter Predicts Accuracy of Social Perception in the Autism Spectrum}, journal = {Journal of Autism and Developmental Disorders}, year = {2014}, month = {6}, volume = {44}, number = {6}, pages = {1433-1446}, abstract = {Individuals with an autism spectrum disorder (ASD) show hallmark deficits in social perception. These difficulties might also reflect fundamental deficits in integrating visual signals. We contrasted predictions of a social perception and a spatial–temporal integration deficit account. Participants with ASD and matched controls performed two tasks: the first required spatiotemporal integration of global motion signals without social meaning, the second required processing of socially relevant local motion. The ASD group only showed differences to controls in social motion evaluation. In addition, gray matter volume in the temporal–parietal junction correlated positively with accuracy in social motion perception in the ASD group. Our findings suggest that social–perceptual difficulties in ASD cannot be reduced to deficits in spatial–temporal integration.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10803-013-2008-3.pdf}, state = {published}, DOI = {10.1007/s10803-013-2008-3}, author = {David N; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Milne E; Schunke O; Sch\"ottle D; M\"unchau A; Siegel M; Vogeley K; Engel AK} } @Inproceedings{ FladNBC2014, title = {System Delay in Flight Simulators Impairs Performance and Increases Physiological Workload}, year = {2014}, month = {6}, pages = {3-11}, abstract = {Delays between user input and the system’s reaction in control tasks have been shown to have a detrimental effect on performance. This is often accompanied by increases in self-reported workload. In the current work, we sought to identify physiological measures that correlate with pilot workload in a conceptual aerial vehicle that suffered from varying time delays between control input and vehicle response. For this purpose, we measured the skin conductance and heart rate variability of 8 participants during flight maneuvers in a fixed-base simulator. Participants were instructed to land a vehicle while compensating for roll disturbances under different conditions of system delay. We found that control error and the self-reported workload increased with increasing time delay. Skin conductance and input behavior also reflect corresponding changes. Our results show that physiological measures are sufficiently robust for evaluating the adverse influence of system delays in a conceptual vehicle model.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-07515-0_1.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Artificial Intelligence ; 8532}, booktitle = {Engineering Psychology and Cognitive Ergonomics}, event_name = {11th International Conference on Engineering Psychology and Cognitive Ergonomics (EPCE 2014)}, event_place = {Heraklion, Greece}, state = {published}, ISBN = {978-3-319-07514-3}, DOI = {10.1007/978-3-319-07515-0_1}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerNBC2014, title = {The Influence of Visualization on Control Performance in a Flight Simulator}, year = {2014}, month = {6}, pages = {202-211}, abstract = {Flight simulators are often assessed in terms of how well they imitate the physical reality that they endeavor to recreate. Given that vehicle simulators are primarily used for training purposes, it is equally important to consider the implications of visualization in terms of its influence on the user’s control performance. In this paper, we report that a complex and realistic visual world environment can result in larger performance errors compared to a simplified, yet equivalent, visualization of the same control task. This is accompanied by an increase in subjective workload. A detailed analysis of control performance indicates that this is because the error perception is more variable in a real world environment.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-07515-0_21.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Artificial Intelligence ; 8532}, booktitle = {Engineering Psychology and Cognitive Ergonomics}, event_name = {11th International Conference on Engineering Psychology and Cognitive Ergonomics (EPCE 2014), held as Part of HCI International 2014}, event_place = {Heraklion, Greece}, state = {published}, ISBN = {978-3-319-07514-3}, DOI = {10.1007/978-3-319-07515-0_21}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ FademrechtBd2014_3, title = {Peripheral Vision and Action Recognition}, year = {2014}, month = {6}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Fademrecht L{lfademrecht}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ ZhaoHB2014_3, title = {Race of Face Affects Various Face Processing Tasks Differently}, year = {2014}, month = {6}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; Hayward WG; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ KimSRWWB2014, title = {Supramodal Representations of Associated Emotions}, year = {2014}, month = {6}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Rohe T{trohe}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; W S{Lee}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2014_2, title = {The Role of Direct Haptic Feedback in a Compensatory Tracking Task}, year = {2014}, month = {6}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Symeonidou E-R{esymeonidou}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ ChuangFSNB2014, title = {Closed-loop control performance and workload in a flight simulator}, year = {2014}, month = {4}, day = {1}, volume = {56}, pages = {45}, abstract = {In closed-loop control tasks (e.g., flying), the human operator is required to continuously monitor visual feedback, so as to evaluate the consequence of his actions and to correct them according to his goal. A flight simulator environment allows us to evaluate the influence of control challenges such as visual feedback delays and control disturbances without endangering the human operator. In addition, a stable simulator environment allows for more robust eye-movement and physiological recordings, which would be difficult to obtain in an actual test-flight. Eye-movement recordings can reveal the aspects of visual information that is relied on for the execution of certain maneuvers. Meanwhile, electrophysiological recordings for heart-based and skin conductance activity as well as EEG can reflect aspects of operator workload. My talk will present work on how visual feedback visualization and latency influences both control performance and workload. This will exemplify how control behavior in a flight simulator differs from that of a comparable compensatory tracking task. In doing so, I will convey the benefits and technical challenges involved in performing behavioral studies in a fixed-base flight simulator that is suitable for evaluating closed-loop control performance, eye- movement behavior and physiological recordings.}, web_url = {https://www.teap.de/memory/TeaP_Abstracts_20140219.pdf}, event_name = {56th Conference of Experimental Psychologists (TeaP 2014)}, event_place = {Giessen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Flad N{nflad}{Department Human Perception, Cognition and Action}; Scheer M{mscheer}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ BrowatzkiBC2014, title = {A comparison of geometric- and regression-based mobile gaze-tracking}, journal = {Frontiers in Human Neuroscience}, year = {2014}, month = {4}, volume = {8}, number = {200}, pages = {1-12}, abstract = {Video-based gaze-tracking systems are typically restricted in terms of their effective tracking space. This constraint limits the use of eyetrackers in studying mobile human behavior. Here, we compare two possible approaches for estimating the gaze of participants who are free to walk in a large space whilst looking at different regions of a large display. Geometrically, we linearly combined eye-in-head rotations and head-in-world coordinates to derive a gaze vector and its intersection with a planar display, by relying on the use of a head-mounted eyetracker and body-motion tracker. Alternatively, we employed Gaussian process regression to estimate the gaze intersection directly from the input data itself. Our evaluation of both methods indicates that a regression approach can deliver comparable results to a geometric approach. The regression approach is favored, given that it has the potential for further optimization, provides confidence bounds for its gaze estimates and offers greater flexibility in its implementation. Open-source software for the methods reported here is also provided for user implementation.}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fnhum.2014.00200/abstract}, state = {published}, DOI = {10.3389/fnhum.2014.00200}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2014, title = {Perception of race information in same-race and other-race faces}, year = {2014}, month = {3}, day = {28}, abstract = {We are very good at classifying familiar and unfamiliar faces in terms of their race or sex, but compared to the robust identification of familiar faces, discriminating unfamiliar faces, especially other-race faces, is more difficult (other-race effect). In this talk, I will present three studies investigating what is important in a face for race classification and person identification. First we investigated what gives a face its perceived ethnicity. To this end, mixed-race faces were created by embedding one facial feature (e.g. Caucasian mouth) into the face of the other ethnicity (e.g. Asian face). The perceived ethnicity of these mixed-race faces was assessed in a classification task. The eyes and the texture (skin) proved to be major determinants of ethnicity for Asian and Caucasian participants. Second, we examined what is at the base of the other-race effect. We dissociated ethnicity from identity information by creating Asian and Caucasian faces that shared the same identity (e.g. making a Caucasian face look more Asian), and tested the other-race effect while controlling identity-related facial information. Participants showed equal race discrimination performance for same- and other-race faces. Thus no other-race effect appeared when ethnicity was the only varying factor between the test faces, suggesting that the other-race effect cannot be attributed to face race per se. Finally, we tested what type of facial information is most relevant for the identification of familiar faces. We created both sex-morphs and identity-morphs of very familiar faces, and asked participants to pick the original familiar face among its sex- or identity-morphs. We found a better performance for identity- than sex-manipulated faces, indicating that sex-related facial information is represented less accurately than identity-related information. The implications of these results for models of face representation will be discussed.}, note = {Angekündigt als: What is important in a face for race classification and person identification?}, web_url = {https://www.sowi.uni-kl.de/fileadmin/wpsy/public/Mar28_Description_Wahrnehmung_2014.pdf}, event_name = {Technische Universität Kaiserslautern: Wahrnehmung - Public talk series}, event_place = {Kaiserslautern, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ FladC2014, title = {Setting up a high-fidelity flight simulator to study closed-loop control and physiological workload}, year = {2014}, month = {3}, web_url = {http://www.interdisciplinary-college.de/previous-iks?id=21}, event_name = {Interdisciplinary College: Cognition 3.0 - the social mind in the connected world (IK 2014)}, event_place = {Günne, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2013_10, title = {What is important in a face for race classification and person identification?}, year = {2013}, month = {12}, day = {12}, abstract = {We are very good at classifying familiar and unfamiliar faces in terms of their race or sex, but compared to the robust identification of familiar faces, discriminating unfamiliar faces, especially other-race faces, is more difficult (other-race effect). In this talk, I will present three studies investigating what is important in a face for race classification and person identification. First we investigated what gives a face its perceived ethnicity. To this end, mixed-race faces were created by embedding one facial feature (e.g. Caucasian mouth) into the face of the other ethnicity (e.g. Asian face). The perceived ethnicity of these mixed-race faces was assessed in a classification task. The eyes and the texture (skin) proved to be major determinants of ethnicity for Asian and Caucasian participants. Second, we examined what is at the base of the other-race effect. We dissociated ethnicity from identity information by creating Asian and Caucasian faces that shared the same identity (e.g. making a Caucasian face look more Asian), and tested the other-race effect while controlling identity-related facial information. Participants showed equal race discrimination performance for same- and other-race faces. Thus no other-race effect appeared when ethnicity was the only varying factor between the test faces, suggesting that the other-race effect cannot be attributed to face race per se. Finally, we tested what type of facial information is most relevant for the identification of familiar faces. We created both sex-morphs and identity-morphs of very familiar faces, and asked participants to pick the original familiar face among its sex- or identity-morphs. We found a better performance for identity- than sex-manipulated faces, indicating that sex-related facial information is represented less accurately than identity-related information. The implications of these results for models of face representation will be discussed.}, web_url = {http://www.sowi.uni-kl.de/en/psychologie-ii/colloquium/}, event_name = {Technische Universität Kaiserslautern, Fachbereich Sozialwissenschaften: Forschungskolloquium WS 2013/14}, event_place = {Kaiserslautern, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ MichelRBHV2013, title = {The contribution of shape and surface information in the other-race face effect}, journal = {Visual Cognition}, year = {2013}, month = {12}, volume = {21}, number = {9-10}, pages = {1202-1223}, abstract = {Faces from another race are generally more difficult to recognize than faces from one's own race. However, faces provide multiple cues for recognition and it remains unknown what are the relative contribution of these cues to this “other-race effect”. In the current study, we used three-dimensional laser-scanned head models which allowed us to independently manipulate two prominent cues for face recognition: the facial shape morphology and the facial surface properties (texture and colour). In Experiment 1, Asian and Caucasian participants implicitly learned a set of Asian and Caucasian faces that had both shape and surface cues to facial identity. Their recognition of these encoded faces was then tested in an old/new recognition task. For these face stimuli, we found a robust other-race effect: Both groups were more accurate at recognizing own-race than other-race faces. Having established the other-race effect, in Experiment 2 we provided only shape cues for recognition and in Experiment 3 we provided only surface cues for recognition. Caucasian participants continued to show the other-race effect when only shape information was available, whereas Asian participants showed no effect. When only surface information was available, there was a weak pattern for the other-race effect in Asians. Performance was poor in this latter experiment, so this pattern needs to be interpreted with caution. Overall, these findings suggest that Asian and Caucasian participants rely differently on shape and surface cues to recognize own-race faces, and that they continue to use the same cues for other-race faces, which may be suboptimal for these faces.}, web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506285.2013.823141}, state = {published}, DOI = {10.1080/13506285.2013.823141}, author = {Michel C; Rossion B; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Hayward WG; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}} } @Conference{ ZhaoB2013_3, title = {The other-race effect in face recognition is sensitive to face format at encoding}, year = {2013}, month = {11}, day = {14}, web_url = {http://www.opam.net/opam2013/program.php}, event_name = {21st Annual Conference on Object Perception, Attention, and Memory (OPAM 2013)}, event_place = {Toronto, ON, Canada}, state = {published}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ DobsSBG2013, title = {Attending to expression or identity of dynamic faces engages different cortical areas}, year = {2013}, month = {11}, day = {10}, volume = {43}, number = {186.03}, abstract = {Identity and facial expression of faces we interact with are represented as invariant and changeable aspects, respectively - what are the cortical mechanisms that allow us to selectively extract information about these two important cues? We had subjects attend to either identity or expression of the same dynamic face stimuli and decoded concurrently measured fMRI activity to ask whether distinct cortical areas were differentially engaged in these tasks. We measured fMRI activity (3x3x3mm, 34 slices, TR=1.5, 4T) from 6 human subjects (2 female) as they performed a change-detection task on dynamic face stimuli. At trial onset, a cue (letters ‘E’ or ‘I’) was presented (0.5s) which instructed subjects to attend to either the expression or the identity of animations of faces (8 presentations per trial of 2s movie clips depicting 1 of 2 facial identities expressing happiness or anger). Subjects were to report (by button press) changes in the cued dimension (these occurred in 20% of trials) and ignore changes in the uncued dimension. Subjects successfully attended to the cued dimension (mean d’=2.4 for cued and d’=-1.9 for uncued dimension), and sensitivity did not differ across tasks (F(1,10)=0.19, p>0.6). Subjects performed 18-20 7min scans (20 trials/scan in pseudorandom order) in 2 sessions. We built linear classifiers to decode the attended dimension. Face-sensitive areas were defined in separate localizer scans as clusters of voxels responding more to faces than to houses. To independently determine the voxels to be included in the analyses, we ran a task localizer in which 10s grey screen was alternated with 10s of stimuli+task. For each area, we selected the 100 voxels whose signal correlated best with task/no task alternations. BOLD signal in these voxels was averaged over 3-21s of each trial of the main experiment, concatenated across subjects and sessions and used to build the classifiers. We found that we could decode the attended dimension on cross-validated data from many visual cortical areas (percentage correct classifications: FFA: 68%, MT: 73%, OFA: 79%, STS: 68%, V1: 77%; p<0.05, permutation test). However, while ventral face-sensitive areas (OFA, FFA) showed larger BOLD signal during attention-to-identity than attention-to-expression trials (p<0.001, t-test), motion processing areas (MT, STS) showed the opposite effect (p<0.001, t-test). Our results suggest that attending to expression or identity engages areas involved in stimulus-specific processing of these two dimensions. Moreover, attending to expression encoded in facial motion recruits motion processing areas, while attending to face identity activates ventral face-sensitive areas.}, web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013}, event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Gardner JL} } @Conference{ Zhao2013, title = {The other-race effect in face recognition is sensitive to face format at encoding}, year = {2013}, month = {11}, day = {6}, abstract = {People recognize own race faces better than those from other races. This other race effect in face recognition has been attributed to differences in holistic processing (Michel, et al., 2006; Tanaka, et al., 2004), in contact (Hancock & Rhodes, 2008; Rhodes et al., 2009), and in the motivation to individualize faces (Hugenberg, et al., 2010). Here I would like to present two studies that tested whether the other race effect is dependent upon the relative engagement of holistic and feature processing at encoding. We manipulated face format at encoding so that the holistic processing was either disrupted or completely removed. The results showed that the other race effect observed under normal face encoding was either eliminated or reversed (i.e., an other race advantage). These results provide a strong support for an encoding dependent account of the other race effect, which might also underlie the effects of racial contact and face individualization on the other race effect observed in prior research.}, web_url = {http://www.psychology.hku.hk/index.php?id=1454}, event_name = {The University of Hong Kong: Department of Psychology Seminar}, event_place = {Hong Kong, China}, state = {published}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ NieuwenhuizenCB2013, title = {myCopter: Enabling Technologies for Personal Aerial Transportation Systems: Project status after 2.5 years}, year = {2013}, month = {11}, pages = {1-3}, abstract = {Current means of transportation for daily commuting are reaching their limits during peak travel times, which results in waste of fuel and loss of time and money. A recent study commissioned by the European Union considers a personal aerial transportation system (PATS) as a viable alternative for transportation to and from work. It also acknowledges that developing such a transportation system should not focus on designing a new flying vehicle for personal use, but instead on investigating issues surrounding the implementation of the transportation system itself. This is the aim of European project myCopter: to determine the social and technological aspects needed to set up a transportation system based on personal aerial vehicles (PAVs). The project focuses on three research areas: human-machine interfaces and training, automation technologies, and social acceptance. Our extended abstract for inclusion in the conference proceedings and our presentation will focus on the achievements during the first 2.5 years of the 4-year project. These include the development of an augmented dynamic model of a PAV with excellent handling qualities that are suitable for training purposes. The training requirements for novice pilots are currently under development. Experimental evaluations on haptic guidance and human-in-the-loop control tasks have allowed us to start implementing a haptic Highway-in-the-Sky display to support novice pilots and to investigate metrics for objectively determining workload using psychophysiological measurements. Within the project, developments for automation technologies have focused on vision-based algorithms. We have integrated such algorithms in the control and navigation architecture of unmanned aerial vehicles (UAVs). Detecting suitable landing spots from monocular camera images recorded in flight has proven to reliably work off-line, but further work is required to be able to use this approach in real time. Furthermore, we have built multiple low-cost UAVs and equipped them with radar sensors to test collision avoidance strategies in real flight. Such algorithms are currently under development and will take inspiration from crowd simulations. Finally, using technology assessment methodologies, we have assessed potential markets for PAVs and challenges for its integration into the current transportation system. This will lead to structured discussions on expectations and requirements of potential PAV users.}, file_url = {fileadmin/user_upload/files/publications/2013/HeliWorld-2013-Nieuwenhuizen.pdf}, publisher = {Airtec GmbH}, address = {Frankfurt a. Main, Germany}, event_name = {5. Internationale HELI World Konferenz "HELICOPTER Technologies", "HELICOPTER Operations" at the International Aerospace Supply Fair AIRTEC 2013}, event_place = {Frankfurt a.M., Germany}, state = {published}, ISBN = {978-3-942939-10-2}, author = {Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Esins2013, title = {What is it like being face blind?}, year = {2013}, month = {10}, day = {2}, volume = {14}, pages = {10}, abstract = {Face blindness or “prosopagnosia” is a disorder which impairs face recognition. That means that people who are face blind cannot recognize another person by his or her face. For my PhD thesis I work with congenital prosopagnosics, i.e. people who are face blind from birth on. During my work I tested a lot of prosopagnosic participants and collected their reports and stories. In my talk I will try to give a general idea what it feels like to be prosopagnosic, what face blind people see when they look at faces, what the everyday challenges are, and what strategies they use to recognize their family members and friends nonetheless.}, web_url = {http://www.cin.uni-tuebingen.de/fileadmin/content/05_News_%26_Events/Conferences/Conference_130930_NeNa_2013.pdf}, event_name = {14th Conference of Junior Neuroscientists of Tübingen (NeNa 2013): Do the Results, Justify the Methods}, event_place = {Schramberg, Germany}, state = {published}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}} } @Article{ BiegBBC2013, title = {Saccade reaction time asymmetries during task-switching in pursuit tracking}, journal = {Experimental Brain Research}, year = {2013}, month = {10}, volume = {230}, number = {3}, pages = {271-281}, abstract = {We investigate how smooth pursuit eye movements affect the latencies of task-switching saccades. Participants had to alternate their foveal vision between a continuous pursuit task in the display center and a discrete object discrimination task in the periphery. The pursuit task was either carried out by following the target with the eyes only (ocular) or by steering an on-screen cursor with a joystick (oculomanual). We measured participants’ saccadic reaction times (SRTs) when foveal vision was shifted from the pursuit task to the discrimination task and back to the pursuit task. Our results show asymmetries in SRTs depending on the movement direction of the pursuit target: SRTs were generally shorter in the direction of pursuit. Specifically, SRTs from the pursuit target were shorter when the discrimination object appeared in the motion direction. SRTs to pursuit were shorter when the pursuit target moved away from the current fixation location. This result was independent of the type of smooth pursuit behavior that was performed by participants (ocular/oculomanual). The effects are discussed in regard to asymmetries in attention and processes that suppress saccades at the onset of pursuit.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs00221-013-3651-9.pdf}, state = {published}, DOI = {10.1007/s00221-013-3651-9}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Article{ ZhaoB2013_2, title = {The other-race effect in face recognition is sensitive to face format at encoding}, journal = {Visual Cognition}, year = {2013}, month = {10}, volume = {21}, number = {6}, pages = {722-725}, note = {21st Annual Meeting on Object Perception, Attention, and Memory (OPAM 2013)}, web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506285.2013.844971#.UtaaUfsViQA}, state = {published}, DOI = {10.1080/13506285.2013.844971}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ KaulardSBd2013, title = {How we evaluate what we see - the interplay between the perceptual and conceptual structure of facial expressions}, journal = {Perception}, year = {2013}, month = {8}, volume = {42}, number = {ECVP Abstract Supplement}, pages = {192}, abstract = {What do you have in mind when judging the similarity of two facial expressions? This study investigates how facial expression attributes are linked to the perceived similarity of facial expressions. Participants were shown pictures and videos of 2 types of facial expressions: 6 emotional (e.g. happy) and 6 conversational (e.g. don’t understand) expressions. One group of participants was asked to rate several attributes of those expressions (e.g. “how much is the person in control of the situation”, “how much does the mouth move”). Another group rated the pairwise similarity of the expressions. We explored the link between attribute ratings and perceived similarity of expressions using multiple regression analysis. The analysis revealed that different attributes best predicted the similarity ratings of pictures and videos of both facial expressions types, suggesting different evaluation strategies. To rule out the possibility that representational spaces based on expression attributes are different across pictures and videos of both expression types, principal component analysis (PCA) was applied. Significant correlations between all PCA results suggest that those representations are similar. In sum, our study suggests different evaluative strategies for pairwise similarity judgments of pictures and videos of emotional and conversational expressions, despite similar representational spaces for these stimuli.}, web_url = {http://pec.sagepub.com/content/42/1_suppl.toc}, event_name = {36th European Conference on Visual Perception (ECVP 2013)}, event_place = {Bremen, Germany}, state = {published}, DOI = {10.1177/03010066130420S101}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Schultz JW{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff H{hhb}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ ZhaoB2013, title = {Learning Faces from Multiple Viewpoints Eliminates the Other-Race Effect}, journal = {Perception}, year = {2013}, month = {8}, volume = {42}, number = {ECVP Abstract Supplement}, pages = {204}, abstract = {People recognize own-race faces more accurate than those of other races. This other-race effect (ORE) has been frequently observed when faces are learned from static, single view images. However, the single-view face learning may prevent the acquisition of useful information (e.g., 3D face shape) for recognizing unfamiliar, other-race faces. Here we tested whether learning faces from multiple viewpoints reduces the ORE. In Experiment 1 participants learned faces from a single viewpoint (left or right 15° view) and were tested with front view(0° view) using an old/new recognition task. They showed better recognition performances for own-race faces than that for other-race faces, demonstrating the ORE in face recognition across viewpoints. In Experiment 2 participants learned each face from four viewpoints (in order, left 45°, left 15°, right 15°, and right 45° views) and were tested in the same way as in Experiment 1. Participants recognized own- and other-race faces equally well, eliminating the ORE. These results suggest that learning faces from multiple viewpoints improves the recognition of other-race faces more than that for own-race faces, and that previously observed ORE is caused in part by the non-optimal encoding condition for other-race faces.}, web_url = {http://pec.sagepub.com/content/42/1_suppl.toc}, event_name = {36th European Conference on Visual Perception (ECVP 2013)}, event_place = {Bremen, Germany}, state = {published}, DOI = {10.1177/03010066130420S101}, author = {Zhao M{mzhao}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ BrielmannBA2013, title = {Looking at faces from different angles: Europeans fixate different features in Asian and Caucasian faces}, journal = {Perception}, year = {2013}, month = {8}, volume = {42}, number = {ECVP Abstract Supplement}, pages = {204}, abstract = {The other-race effect is the widely known difficulty at recognizing faces of another race. Further, it has been clearly established in eye tracking studies that observers of different cultural background exhibit different viewing strategies. Whether those viewing strategies depend also on the type of faces shown (same-race vs. other-race faces) is under much debate. Using eye tracking, we investigated whether European observers look at different facial features when viewing Asian and Caucasian faces in a face race categorization task. Additionally, to investigate the influence of viewpoints on gaze patterns, we presented faces in frontal, half profile and profile views. Even though fixation patterns generally changed across views, fixations to the eyes were more frequent for Caucasian faces and fixations to the nose were more frequent for Asian faces, independent of face orientation. In contrast, how fixations to cheeks, mouth and outline regions changed according to the face’s race was also dependent on face orientations. In sum, our results indicate that we mainly look at prominent facial features, albeit which features are fixated most often critically depends on face race and orientation.}, web_url = {http://pec.sagepub.com/content/42/1_suppl.toc}, event_name = {36th European Conference on Visual Perception (ECVP 2013)}, event_place = {Bremen, Germany}, state = {published}, DOI = {10.1177/03010066130420S101}, author = {Brielmann A{abrielmann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Armann R{armann}{Department Human Perception, Cognition and Action}} } @Poster{ DobsBBVCS2013, title = {Quantifying Human Sensitivity to Spatio-Temporal Information in Dynamic Faces}, journal = {Perception}, year = {2013}, month = {8}, volume = {42}, number = {ECVP Abstract Supplement}, pages = {197}, abstract = {A great deal of social information is conveyed by facial motion. However, understanding how observers use the natural timing and intensity information conveyed by facial motion is difficult because of the complexity of these motion cues. Here, we systematically manipulated animations of facial expressions to investigate observers’ sensitivity to changes in facial motion. We filmed and motion-captured four facial expressions and decomposed each expression into time courses of semantically meaningful local facial actions (e.g., eyebrow raise). These time courses were used to animate a 3D head model with either the original time courses or approximations of them. We then tested observers’ perceptual sensitivity to these changes using matching-to-sample tasks. When viewing two animations (original vs. approximation), observers chose original animations as most similar to the video of the expression. In a second experiment, we used several measures of stimulus similarity to explain observers’ choice of which approximation was most similar to the original animation when viewing two different approximations. We found that high-level cues about spatio-temporal characteristics of facial motion (e.g., onset and peak of eyebrow raise) best explained observers’ choices. Our results demonstrate the usefulness of our method; and importantly, they reveal observers’ sensitivity to natural facial dynamics.}, web_url = {http://pec.sagepub.com/content/42/1_suppl.toc}, event_name = {36th European Conference on Visual Perception (ECVP 2013)}, event_place = {Bremen. Germany}, state = {published}, DOI = {10.1177/03010066130420S101}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}; Schultz JW{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ SchultzBK2013, title = {Signs of predictive coding in dynamic facial expression processing}, journal = {Perception}, year = {2013}, month = {8}, volume = {42}, number = {ECVP Abstract Supplement}, pages = {55}, abstract = {Processing social information contained in facial motion is likely to involve neural mechanisms in hierarchically organized brain regions. To investigate processing of facial expressions, we acquired functional magnetic imaging data from 11 participants observing videos of 12 facial expressions. Stimuli were presented upright (clearly perceivable social information) and upside-down (disrupted social information). We assessed the amount of information contained in the brain activation patterns evoked by these expressions with multivariate searchlight analyses. We found reliable above-chance decoding performance for upright stimuli only in the left superior temporal sulcus region (STS) and for inverted stimuli only in the early visual cortex (group effects, corrected for family-wise errors resulting from multiple comparisons across gray matter voxels). Predictive coding proposes that inferences from high-level areas are subtracted from incoming sensory information in lower-level areas through feedback. Accordingly, we propose that upright stimuli activate representations of facial expressions in STS, which induces feedback to early visual areas and reduced processing in those regions. In contrast, we propose that upside-down stimuli fail to activate representations in STS and thus are processed longer in early visual cortex. Predictive coding might prove a useful framework for studying the network of brain regions processing social information.}, web_url = {http://pec.sagepub.com/content/42/1_suppl.toc}, event_name = {36th European Conference on Visual Perception (ECVP 2013)}, event_place = {Bremen, Germany}, state = {published}, DOI = {10.1177/03010066130420S101}, author = {Schultz JW{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff H{hhb}{Department Human Perception, Cognition and Action}; Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangNB2013, title = {A Fixed-Based Flight Simulator Study: The Interdependence of Flight Control Performance and Gaze Efficiency}, year = {2013}, month = {7}, pages = {95-104}, abstract = {Here, a descriptive study is reported that addresses the relationship between flight control performance and instrument scanning behavior. This work was performed in a fixed-based flight simulator. It targets the ability of untrained novices to pilot a lightweight rotorcraft in a flight scenario that consisted of fundamental mission task elements such as speed and altitude changes. The results indicate that better control performance occurs when gaze is more selective for and focused on key instruments. Ideal instrument scanning behavior is proposed and its relevance for training instructions and visual instrument design is discussed.}, file_url = {fileadmin/user_upload/files/publications/2013/HCI-I-2013-Chuang.pdf}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-642-39354-9.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 8020}, booktitle = {Engineering Psychology and Cognitive Ergonomics: Applications and Services}, event_name = {10th International Conference EPCE 2013, Held as Part of HCI International 2013}, event_place = {Las Vegas, NV, USA}, state = {published}, ISBN = {978-3-642-39353-2}, DOI = {10.1007/978-3-642-39354-9_11}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulftoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BiegBC2013, title = {Attentional Biases during Steering Behavior}, year = {2013}, month = {7}, pages = {21-27}, abstract = {In the current study, we examine eye movements of human operators during a combined steering and discrimination task. In this task, observers had to alternate their gaze between a central steering task and a discrimination task in the periphery. Our results show that the observer’s gaze behavior is influenced by the motion direction of the steering task. Saccade reaction times (SRTs) of saccades to the discrimination target were shorter if the target appeared in the steering direction. SRTs back to the steering task were shorter when the steering target moved away from the discrimination target. These effects are likely the result of motion-related attention shifts and an interaction of the saccadic and smooth pursuit eye movement system.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-642-39173-6.pdf}, editor = {Duffy, V.G.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 8020}, booktitle = {Digital Human Modeling and Applications in Health, Safety, Ergonomics, and Risk Management: Healthcare and Safety of the Environment and Transport}, event_name = {4th International Conference DHM 2013, Held as Part of HCI International 2013}, event_place = {Las Vegas, NV, USA}, state = {published}, ISBN = {978-3-642-39172-9}, DOI = {10.1007/978-3-642-39173-6_3}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ JungBTLA2013, title = {The Role of Race in Summary Representations of Faces}, journal = {Journal of Vision}, year = {2013}, month = {7}, volume = {13}, number = {9}, pages = {861}, abstract = {One possibility to overcome the processing limitation of the visual system is to attend selectively to relevant information only. Another strategy is to process sets of objects as ensembles and represent their average characteristics instead of individual group members (e.g., mean size, brightness, orientation). Recent evidence suggests that ensemble representation might occur even for human faces (for a summary, see Alvarez, 2011), i.e., observers can extract the mean emotion, sex, and identity from a set of faces (Habermann & Whitney, 2007; de Fockert & Wolfenstein, 2009). Here, we extend this line of research into the realm of face race: Can we extract the "mean race" of a set of faces when no conscious perception of single individuals is possible? Moreover, does the visual system process own- and other-race faces differently at this stage? Face stimuli had the same (average) male identity but were morphed, at different levels, in between Asian and Caucasian appearance. Following earlier studies (e.g., Habermann & Whitney, 2007, 2010), observers were briefly (250ms) presented with random sets of 12 of these faces. They were then asked to adjust a test face to the perceived mean race of the set by "morphing" it between Asian and Caucasian appearance. The results show that for most participants the response error distribution is significantly different from random, while their responses are centered around the real stimulus set mean - suggesting that they are able to extract "mean race". Also, we find a bias towards responding more "Asian" than the actual mean of a face set. All participants tested so far are South Korean (from Seoul), indicating that even at this early (unconscious) processing stage, the visual system distinguishes between own- and other-race faces, giving more weight to the former. Follow-up experiments on Caucasian participants will be performed to validate this observation.}, web_url = {http://www.journalofvision.org/content/13/9/861.short}, event_name = {13th Annual Meeting of the Vision Sciences Society (VSS 2013)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/13.9.861}, author = {Jung W-M{wjung}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Thornton I{ian}{Department Human Perception, Cognition and Action}; Lee S-W; Armann R{armann}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2013_6, title = {Wie viel Wahrheit steckt in der Wahrnehmung? / Quelle vérité se cache dans la perception?}, year = {2013}, month = {5}, day = {31}, event_name = {Symposium der Schweizerischen Gesellschaft für Histologietechnik}, event_place = {Bern, Switzerland}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ BonevCE2012, title = {How do image complexity, task demands and looking biases influence human gaze behavior?}, journal = {Pattern Recognition Letters}, year = {2013}, month = {5}, volume = {34}, number = {7}, pages = {723–730}, abstract = {In this paper we propose an information-theoretic approach to understand eye-movement patterns, in relation to the task performed and image complexity. We commence with the analysis of the distributions and amplitudes of eye-movement saccades, performed across two different image-viewing tasks: free viewing and visual search. Our working hypothesis is that the complexity of image information and task demands should interact. This should be reflected in the Markovian pattern of short and long saccades. We compute high-order Markovian models of performing a large saccade after many short ones and also propose a novel method for quantifying image complexity. The analysis of the interaction between high-order Markovianity, task and image complexity supports our hypothesis.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0167865512001687}, state = {published}, DOI = {10.1016/j.patrec.2012.05.007}, author = {Bonev B; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Escolano F} } @Article{ SchultzBBP2012, title = {What the Human Brain Likes About Facial Motion}, journal = {Cerebral Cortex}, year = {2013}, month = {5}, volume = {23}, number = {5}, pages = {1167-1178}, abstract = {Facial motion carries essential information about other people's emotions and intentions. Most previous studies have suggested that facial motion is mainly processed in the superior temporal sulcus (STS), but several recent studies have also shown involvement of ventral temporal face-sensitive regions. Up to now, it is not known whether the increased response to facial motion is due to an increased amount of static information in the stimulus, to the deformation of the face over time, or to increased attentional demands. We presented nonrigidly moving faces and control stimuli to participants performing a demanding task unrelated to the face stimuli. We manipulated the amount of static information by using movies with different frame rates. The fluidity of the motion was manipulated by presenting movies with frames either in the order in which they were recorded or in scrambled order. Results confirm higher activation for moving compared with static faces in STS and under certain conditions in ventral temporal face-sensitive regions. Activation was maximal at a frame rate of 12.5 Hz and smaller for scrambled movies. These results indicate that both the amount of static information and the fluid facial motion per se are important factors for the processing of dynamic faces.}, file_url = {fileadmin/user_upload/files/publications/2012/Cerebral-Cortex-2012-Schultz.pdf}, web_url = {http://cercor.oxfordjournals.org/content/23/5/1167.full.pdf+html}, state = {published}, DOI = {10.1093/cercor/bhs106}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Brockhaus M{mabrockhaus}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Pilz K{kpilz}{Department Human Perception, Cognition and Action}} } @Article{ SonFLKBR2012, title = {Human-Centered Design and Evaluation of Haptic Cueing for Teleoperation of Multiple Mobile Robots}, journal = {IEEE Transactions on Cybernetics}, year = {2013}, month = {4}, volume = {43}, number = {2}, pages = {597-609}, abstract = {In this paper, we investigate the effect of haptic cueing on a human operator's performance in the field of bilateral teleoperation of multiple mobile robots, particularly multiple unmanned aerial vehicles (UAVs). Two aspects of human performance are deemed important in this area, namely, the maneuverability of mobile robots and the perceptual sensitivity of the remote environment. We introduce metrics that allow us to address these aspects in two psychophysical studies, which are reported here. Three fundamental haptic cue types were evaluated. The Force cue conveys information on the proximity of the commanded trajectory to obstacles in the remote environment. The Velocity cue represents the mismatch between the commanded and actual velocities of the UAVs and can implicitly provide a rich amount of information regarding the actual behavior of the UAVs. Finally, the Velocity+Force cue is a linear combination of the two. Our experimental results show that, while maneuverability is best supported by the Force cue feedback, perceptual sensitivity is best served by the Velocity cue feedback. In addition, we show that large gains in the haptic feedbacks do not always guarantee an enhancement in the teleoperator's performance.}, file_url = {fileadmin/user_upload/files/publications/2012/2013a-SonFraChuKimBueRob.pdf}, web_url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6294459}, state = {published}, DOI = {10.1109/TSMCB.2012.2212884}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}} } @Article{ SchultzB2013, title = {Parametric animacy percept evoked by a single moving dot mimicking natural stimuli}, journal = {Journal of Vision}, year = {2013}, month = {3}, volume = {13}, number = {4:15}, pages = {1-19}, abstract = {Identifying moving things in the environment is a priority for animals as these could be prey, predators, or mates. When the shape of a moving object is hard to see, motion becomes an important cue to distinguish animate from inanimate things. We report a new stimulus in which a single moving dot evokes a reasonably strong percept of animacy by mimicking the motion of naturally occurring stimuli, with minimal context information. Stimulus movements are controlled by an equation such that changes in a single movement parameter lead to gradual changes in animacy judgments with minimal changes in low-level stimulus properties. An infinite number of stimuli can be created between the animate and inanimate extremes. A series of experiments confirm the strength of the percept and show that observers tend to follow the stimulus with their eye gaze. However, eye movements are not necessary for perceptual judgments, as forced fixation on the display center only slightly reduces the amplitude of percept changes. Withdrawing attentional resources from the animacy judgment using a simultaneous secondary task further reduces percept amplitudes without abolishing them. This stimulus could open new avenues for the principled study of animacy judgments based on object motion only.}, web_url = {http://www.journalofvision.org/content/13/4/15.full.pdf+html}, state = {published}, DOI = {10.1167/13.4.15}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ WallravenD2013, title = {Visual experience is necessary for efficient haptic face recognition}, journal = {NeuroReport}, year = {2013}, month = {3}, volume = {24}, number = {5}, pages = {254–258}, abstract = {Humans are experts for face processing - this expertise develops over the course of several years, given visual input about faces from infancy. Recent studies have shown that individuals can also recognize faces haptically, albeit at lower performance than visually. Given that blind individuals are extensively trained on haptic processing, one may expect them to perform better at recognizing faces from touch than sighted individuals. Here, we tested this hypothesis using matched groups of sighted, congenitally blind, and acquired-blind individuals. Surprisingly, we found little evidence for a performance benefit for blind participants compared with sighted controls. Moreover, the congenitally blind group performed significantly worse than both the sighted and the acquired-blind group. Our results are consistent with the hypothesis that visual expertise may be necessary for haptic face recognition; hence, even extensive haptic training cannot easily account for deficits in visual processing.}, web_url = {http://journals.lww.com/neuroreport/pages/articleviewer.aspx?year=2013&issue=03270&article=00010&type=abstract}, state = {published}, DOI = {10.1097/WNR.0b013e32835f00c0}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}} } @Poster{ BiegCBB2013, title = {Asymmetric saccade initiation at smooth pursuit onset}, year = {2013}, month = {1}, web_url = {http://www.fh-ooe.at/kongresswesen/konferenzen-kongresse/2013/23rd-oculomotor-meeting-2013/}, event_name = {23rd Oculomotor Meeting}, event_place = {Linz, Austria}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2012, title = {Active Information Retrieval in Scene Perception and Object Learning}, year = {2012}, month = {11}, day = {2}, abstract = {We pick out task-relevant information from the visual scene by moving our eyes and confidently manipulate our near-environment to achieve our goals. A better understanding of human behavior can be achieved by adopting this perspective. That is, humans are active (not passive observers). In my talk, I will address how we characterize natural information-seeking behavior in human participants in two context: a) scene processing, b) object learning. The first addresses how unrestrained gaze behavior can be characterized in terms of the information that is available in the scene. Here, I will explain why and how we eschew pure bottom-up procedures of using low-level image statistics to predict gaze movements. Next, I will discuss how we select which views of unfamiliar objects to learn, when we are free to manipulate them in 3D.}, event_name = {Department of Cognitive Neuroscience: Duke-NUS Graduate Medical School}, event_place = {Singapore}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ EsinsSKWB2012_2, title = {Comparing the other race effect and congenital prosopagnosia using a three-experiment test battery}, year = {2012}, month = {11}, volume = {13}, pages = {38}, abstract = {Congenital prosopagnosia, an innate impairment in recognizing faces, as well as the otherrace-effect, the disadvantage in recognizing faces of foreign races, both influence face recognition abilities. Here we compared both phenomena by testing three groups: German congenital prosopagnosics (cPs), unimpaired German and unimpaired South Korean participants (n=23 per group), on three tests with Caucasian faces. First we ran the Cambridge Face Memory Test (Duchaine & Nakayama, 2006 Neuropsychologia 44 576-585). Participants had to recognize Caucasian target faces in a 3AFC task. German controls performed better than Koreans (p=0.009) who performed better than prosopagnosics (p=0.0001). Variation of the individual performances was larger for cPs than for Koreans (p = 0.028). In the second experiment, participants rated the similarity of Caucasian faces (in-house 3D face-database) which differed parametrically in features or second order relations (configuration). We found differences between sensitivities to change type (featural or configural, p=0) and between groups (p=0.005) and an interaction between both factors (p = 0.019). During the third experiment, participants had to learn exemplars of artificial objects (greebles), natural objects (shells), and faces and recognize them among distractors. The results showed an interaction (p = 0.005) between stimulus type and participant group: cPs where better for non-face stimuli and worse for face stimuli than the other groups. Our results suggest that congenital prosopagnosia and the other-race-effect affect face perception in different ways. The broad range in performance for the cPs directs the focus of our future research towards looking for different forms of congenital prosopagnosia.}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_name = {13th Conference of the Junior Neuroscientists of Tübingen (NeNA 2012): Science and Education as Social Transforming Agents}, event_place = {Schramberg, Germany}, state = {published}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Kim BR; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ NieuwenhuizenCB2012, title = {myCopter: Enabling Technologies for Personal Aerial Transportation Systems A progress report}, year = {2012}, month = {11}, abstract = {The volume of both road and air transportation continues to increase despite many concerns regarding its financial and environmental impact. The European Union ‘Out of the Box’ study suggests a personal aerial transportation system (PATS) as an alternative means of transport for daily commuting. The aim of the myCopter project is to determine the social and technical aspects needed to set up such a transportation system based on personal aerial vehicles (PAVs). The project focuses on three research areas: the human-machine interface and training, automation technologies, and social acceptance. In the first phase of the project, requirements were defined for automation technologies in terms of sensors and test platforms. Additionally, desirable features for PAVs were investigated to support the design and evaluation of technologies for an effective human-machine interface. Furthermore, an overview of the social-technological environment provided insight into the challenges and issues that surround the realisation of a PATS and its integration into the current transportation system in Europe. The presentation will elaborate on the second phase of the myCopter project, in which initial designs for a human-machine interface and training are developed. These are evaluated experimentally with a focus on aiding non-expert pilots in closed-loop control scenarios. Additionally, first evaluations of novel automation technologies are performed in simulated environments and evaluations on flying test platforms. At the same time, technological issues are evaluated that contribute towards a reflexive design of PAV technologies based on criteria that are acceptable to the general public. The presentation will also focus on the next stages of the project, in which further experimental evaluations will be performed on technologies for human-machine interfaces, and where developed automation technologies will be fully tested on unmanned flying vehicles. The expectations and perspectives of potential PAV user will be evaluated in group interviews in different European countries. Interesting technological and regulatory challenges need to be resolved for the development of a transportation system based on PAVs. The myCopter consortium combines the expertise from several research fields to tackle these challenges and to develop the technological and social aspects of a personal aerial transportation system.}, file_url = {fileadmin/user_upload/files/publications/2012/HELIWorld-2012-Nieuwenhuizen.pdf}, event_name = {4th International HELI World Conference at the International Aerospace Supply Fair AIRTEC 2012}, event_place = {Frankfurt a.M., Germany}, state = {published}, author = {Nieuwenhuizen F{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2012_15, title = {Face Perception: Using a morphable face model to determine what makes a face look Asian or Caucasian and what makes a face attractive and why?}, year = {2012}, month = {10}, day = {24}, abstract = {For German observers, Koreans look far more similar to each other than Germans do and vice-versa. This phenomenon is referred to as the other-race effect (ORE). So far, this ORE was described in tasks involving faces that did not differ only in ethnicity, but also in identity. In the first study that I will present, we dissociated ethnicity from identity information to create pairs of faces that share similar identity information but differ in ethnicity. For each face pair, participants reported which face looked more Asian or more Caucasian. We tested participants from Korea and Germany. Both groups of participants showed equal performance for same-race (high expertise) and other-race pairs (low expertise). Thus they showed no evidence of an other-race effect when ethnicity was the only varying factor between the faces to compare. Participants’ cultural background, however, affected their eye movement strategy. In our second study about ethnicity, mixed-race (Asian and Caucasian) faces were created by embedding one facial feature of one ethnicity (e.g. Caucasian mouth) in a face of the other ethnicity (e.g. Asian face). The influence of each exchanged facial feature on the ethnicity perception for the face it was embedded in was assessed in an ethnicity classification task. The results show that the eyes and the texture (skin) are major determinants of ethnicity classification for both Asian and Caucasian observers. In the last part of my presentation, I will talk more generally about what makes a face attractive and why.}, web_url = {http://cse.snu.ac.kr/en/node/5125}, event_name = {Seoul National University: Department of Computer Science and Engineering}, event_place = {Seoul, South Korea}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ BiegBBC2012, title = {Looking for Discriminating Is Different from Looking for Looking's Sake}, journal = {PLoS ONE}, year = {2012}, month = {9}, volume = {7}, number = {9}, pages = {1-9}, abstract = {Recent studies provide evidence for task-specific influences on saccadic eye movements. For instance, saccades exhibit higher peak velocity when the task requires coordinating eye and hand movements. The current study shows that the need to process task-relevant visual information at the saccade endpoint can be, in itself, sufficient to cause such effects. In this study, participants performed a visual discrimination task which required a saccade for successful completion. We compared the characteristics of these task-related saccades to those of classical target-elicited saccades, which required participants to fixate a visual target without performing a discrimination task. The results show that task-related saccades are faster and initiated earlier than target-elicited saccades. Differences between both saccade types are also noted in their saccade reaction time distributions and their main sequences, i.e., the relationship between saccade velocity, duration, and amplitude.}, web_url = {http://www.plosone.org/article/fetchObjectAttachment.action;jsessionid=409E420397B230BE376365245B458D2A?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0045445&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0045445}, EPUB = {e45445}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ BiegBBC2012_2, title = {Asymmetries in saccadic latencies during interrupted ocular pursuit}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {137}, abstract = {Smooth pursuit eye movements can be interrupted and resumed at a later stage, eg, when a concurrent task requires visual sampling from elsewhere. Here we address whether and how interruptive saccades are affected by pursuit movements. Our participants pursued an object which moved horizontally in a sinusoidal pattern (frequency: 0.25 Hz, amplitude: 4 deg. visual angle). During this, discrimination targets appeared at 10 deg. eccentricity, to the left or right of the center. They were timed so that they appeared for 1 second while the pursuit object moved either toward or away from the discrimination target's position. Saccade reaction times were earlier when the discrimination targets appeared in a position that the tracking object was moving towards. Interestingly, saccade RTs back to the pursuit object were shorter when the object moved away from the discrimination target. We conclude that interruptions of pursuit movements lead to asymmetries in saccade generation. These asymmetries could have been caused by biases in attention along the predicted pursuit path.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ EsinsBKS2012, title = {Can a test battery reveal subgroups in congenital prosopagnosia?}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {113}, abstract = {Congenital prosopagnosia, the innate impairment in recognizing faces exhibits diverse deficits. Due to this heterogeneity the possible existence of subgroups of the impairment was suggested (eg Kress and Daum, 2003 Behavioural Neurology14109-21). We examined 23 congenital prosopagnosics (cPAs) identified via a screening questionnaire (as used in Stollhoff, Jost, Elze, and Kennerknecht, 2011 PLoS ONE6e15702) and 23 age-, gender and educationally matched controls with a battery consisting of nine different tests. These included well known tests like the Cambridge Face Memory Test (CFMT, Duchaine and Nakayama, 2006 Neuropsychologia44576-85), a Famous Face Test (FFT), and new, own tests about object and face recognition. As expected, cPAs had lower CFMT and FFT scores than the controls. Analyses of the performance patterns across the nine tests suggest the existence of subgroups within both cPAs and controls. These groups could not be revealed only based on the CFMT and FFT scores, indicating the necessity of tests addressing different, specific aspects of object and face perception for the identification of subgroups. Current work focuses on characterizing the subgroups and identifying the most useful tests.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Kennerknecht I; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangNB2012, title = {Eye-movement planning during flight maneuvers}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {99}, abstract = {How are eye-movements planned to access relevant visual information during flight control? From the cockpit perspective, there are two classes of visual information that are relevant for flight control. First, the changing visuals of the external world provide direct perceptual feedback on how the pilot's command of the control stick is affecting the aircraft's current position, orientation and velocity. Second, flight instruments provide abstracted and specific values—on factors such as the aircraft's compass bearing and vertical speed—that have to be continuously monitored, in order for the global objective of certain maneuvers (eg, turns) to be achieved. Trained pilots have to coordinate their eye-movements across this structured visual workspace (ie, outside view and instruments) to access timely and task-relevant information. The current work focuses on providing descriptions of these planned eye-movements. Eye-movements were recorded of pilots in a high-fidelity flight simulator (100° field-of-view) whilst they performed specific flight maneuvers. Fixation durations and transitions between the individual instruments and aspects of the external environment are represented as network graphs. This allowed us to formally describe the sources of information that were relied on across the different tasks and to compare actual performance to expert predictions.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen F{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ KaulardSWBd2012, title = {Inverting natural facial expressions puzzles you}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {103}, abstract = {The face inversion effect has often been demonstrated in face identification tasks. Less is known about whether processes underlying face expression recognition are also sensitive to face inversion. Face expression recognition is usually investigated using pictures of six emotional expressions. In everyday life, humans are however exposed to a much larger set of facial expressions, which are dynamic. Here, we examine the effect of face inversion on expression recognition for a variety of facial expressions displayed statically and dynamically. We measured participants'recognition accuracy for 12 expressions using a 13 alternative-forced-choice task. We varied the dynamics (videos versus pictures) and the orientation (upright versus inverted) of the presentation of the expressions in a completely crossed design. Accuracy was significantly higher when expressions were presented as videos (62%) than as pictures (47%). Similarly, recognition accuracy was significantly higher for upright (84%) compared to inverted (64%) expressions. Moreover, the effect of orientation changed significantly with expression type. No other effects were significant. This is the first study to report that face inversion affects the recognition of natural facial expressions. Because face inversion effects are interpreted as a sign of configural processing, our results suggest configural processing for a majority of facial expressions.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangNB2012_2, title = {Investigating Gaze Behavior of Novice Pilots during Basic Flight Maneuvers}, year = {2012}, month = {9}, web_url = {http://research.fit.edu/hci-aero/hci-aero2012/Poster_Sessions.html}, event_name = {International Conference on Human-Computer Interaction in Aerospace (HCI-Aero 2012)}, event_place = {Bruxelles, Belgium}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ SchultzFdBK2012, title = {How are facial expressions represented in the human brain?}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {38}, abstract = {The dynamic facial expressions that we encounter every day can carry a myriad of social signals. What are the neural mechanisms allowing us to decode these signals? A useful basis for this decoding could be representations in which the facial expressions are set in relation to each other. Here, we compared the behavioral and neural representations of 12 facial expressions presented as pictures and videos. Behavioral representations of these expressions were computed based on the results of a semantic differential task. Neural representations of these expressions were obtained by multivariate pattern analysis of functional magnetic imaging data. The two kinds of representations were compared using correlations. For expression videos, the results show a significant correlation between the behavioral and neural representations in the superior temporal sulcus (STS), the fusiform face area, the occipital face area and the amygdala, all in the left hemisphere. For expression pictures, a significant correlation was found only in the left STS. These results suggest that of all tested regions, the left STS contains the neural representation of facial expressions that is closest to their behavioral representation. This confirms the predominant role of STS in coding changeable aspects of faces, which includes expressions.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Fernandez Cruz AL{anafer}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Article{ GaissertWFB2012, title = {Haptic Categorical Perception of Shape}, journal = {PLoS One}, year = {2012}, month = {8}, volume = {7}, number = {8}, pages = {1-7}, abstract = {Categorization and categorical perception have been extensively studied, mainly in vision and audition. In the haptic domain, our ability to categorize objects has also been demonstrated in earlier studies. Here we show for the first time that categorical perception also occurs in haptic shape perception. We generated a continuum of complex shapes by morphing between two volumetric objects. Using similarity ratings and multidimensional scaling we ensured that participants could haptically discriminate all objects equally. Next, we performed classification and discrimination tasks. After a short training with the two shape categories, both tasks revealed categorical perception effects. Training leads to between-category expansion resulting in higher discriminability of physical differences between pairs of stimuli straddling the category boundary. Thus, even brief training can alter haptic representations of shape. This suggests that the weights attached to various haptic shape features can be changed dynamically in response to top-down information about class membership.}, web_url = {http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0043062}, state = {published}, DOI = {10.1371/journal.pone.0043062}, EPUB = {e43062}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Waterkamp S{swaterka}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BiegCFRB2012, title = {Einfluss von Ablenkung und Augenbewegungen auf Steuerungsaufgaben}, year = {2012}, month = {8}, pages = {341-344}, abstract = {In der vorliegenden Studie wurde der Einfluss visueller Ablenkung auf Steuerungsaufgaben untersucht. Die Ergebnisse deuten darauf hin, dass bereits eine kurze Verlagerung der Aufmerksamkeit und des Blicks mit einer systematischen Beeinflussung der Steuerungsaufgabe einhergeht. Im Gegenzug findet auch eine systematische Beeinflussung der Augenbewegungen durch die gleichzeitig durchgeführte Steuerungsaufgabe statt. Die Berücksichtigung solcher Interferenzen kann bei der Entwicklung von grafischen On-Board-Informationssystemen für Fahr- oder Flugzeuge von Nutzen sein.}, web_url = {http://dl.mensch-und-computer.de/handle/123456789/2907}, editor = {Reiterer, H. , O. Deussen}, publisher = {Oldenbourg}, address = {München, Germany}, booktitle = {Mensch & Computer 2012: 12. fachübergreifende Konferenz für interaktive und kooperative Medien}, event_name = {Mensch & Computer (M&C)}, event_place = {Konstanz, Germany}, state = {published}, ISBN = {978-3-486-71879-9}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ DobsBCS2012, title = {Investigating factors influencing the perception of identity from facial motion}, journal = {Journal of Vision}, year = {2012}, month = {8}, volume = {12}, number = {9}, pages = {35}, abstract = {Previous research has shown that facial motion can convey information about identity in addition to facial form (e.g. Hill & Johnston, 2001). The present study aims at finding whether identity judgments vary depending on the kinds of facial movements and the task performed. To this end, we used a recent facial motion capture and animation system (Curio et al., 2006). We recorded different actors performing classic emotional facial movements (e.g. happy, sad) and non-emotional facial movements occurring in social interactions (e.g. greetings, farewell). Only non-rigid components of these facial movements were used to animate one single avatar head. In a between-subject design, four groups of participants performed identity judgments based on emotional or social facial movements in a same-different (SD) or a delayed matching-to-sample task (XAB). In the SD task, participants watched two distinct facial movements (e.g. happy and sad) and had to choose whether the same or different actors performed these facial movements. In the XAB task, participants saw one target facial movement X (e.g. happy) performed by one actor followed by two facial movements of another kind (e.g. sad) performed by two actors. Participants chose which of the latter facial movements was performed by the same actor as the one performing X. Prior to the experiment, participants were familiarized with the actors by watching them perform facial movements not subsequently tested. Participants were able to judge actor identities correctly in all conditions, except for the SD task performed on the emotional stimuli. Sensitivity to identity as measured by d-prime was higher in the XAB than in the SD task. Furthermore, performance was higher for social than for emotional stimuli. Our findings reveal an effect of task on identity judgments based on facial motion, and suggest that such judgments are easier when facial movements are less stereotypical.}, web_url = {http://www.journalofvision.org/content/12/9/35.abstract}, event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/12.9.35}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ Bulthoff2012_5, title = {What gives a face its ethnicity?}, journal = {Journal of Vision}, year = {2012}, month = {8}, volume = {12}, number = {9}, pages = {1282}, abstract = {We can quickly and easily judge faces in terms of their ethnicity. What is the basis for our decision? Other studies have used either eye tracking (e.g., Armann & Bülthoff 2009) or the Bubbles method (e.g., Gosselin & Schyns 2001) in categorization tasks to investigate which facial features are used for sex or identity classification. The first method investigates which parts are preferentially looked at while the latter method shows which facial regions, when shown in isolation during the task, leads to correct classification. Here we measured the influence of facial features on ethnicity classification when they are embedded in the face of the other ethnicity. Asian and Caucasian faces of our 3D face database (http://faces.kyb.tuebingen.mpg.de) had been paired according to sex, age and appearance. We used 18 pairs of those Asian-Caucasian faces to create a variety of mixed-race faces. Mixed-race faces were obtained by exchanging one of the following facial features between both faces of a pair: mouth, nose, facial contour, shape, texture (skin) and eyes. We showed original and modified faces one by one in a simple ethnicity classification task. All faces were turned 20 degrees to the side for a more informative view of nose shape, face shape and facial contour while eyes and mouth and general face textures were still fully visible. Because of skin color differences between exchanged parts and original faces, all 3D faces were rendered as grey-level images. The results of 24 Caucasian participants show that the eyes and the texture of a face are major determinants for ethnicity classification, more than face shape and face contour, while mouth and nose had weak influence. Response times showed that participants were faster at classifying less ambiguous faces.}, web_url = {http://www.journalofvision.org/content/12/9/1282.abstract}, event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/12.9.1282}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ KimESBW2012, title = {Mapping the other-race-effect in face recognition using a three-experiment test battery}, journal = {i-Perception}, year = {2012}, month = {7}, day = {15}, volume = {3}, number = {9}, pages = {711}, abstract = {The fact that people are better at recognizing faces of their own race than others is called the other-race-effect (ORE). Most studies use only a single test to map and determine the characteristics of the ORE, however. Here, we investigated how two groups of fifteen age-matched Korean and German participants recognize Asian and Caucasian faces with three experiments as part of testing a new battery for characterizing face-processing performance. Participants first underwent the standard Cambridge face memory test in which they had to learn Caucasian target faces at varying noise levels which then were to be recognized in a forced-choice task. In this task, German participants performed significantly better than Koreans (83% versus 72%). The second experiment used a standard old-new recognition task with 20 Caucasian and 20 Asian faces (courtesy of the tarrlab@CMU). Here, Koreans were better with Asian faces (d’-difference=1.23) whereas Germans only showed a tendency towards an ORE (d’-difference=0.44). In the third experiment, participants had to rate the similarity of Caucasian face pairs which varied parametrically along featural and configural dimensions using the morphable faces from the MPI face-database. Here, we found that Korean participants were significantly less sensitive to featural changes than German participants. In conclusion, we were able to demonstrate an ORE for most of our experimental conditions. Interestingly, data from the third experiment suggests that the ORE may be due more to lessened sensitivity to featural than to configural processing for other-race faces. Future studies will extend this new test battery to prosopagnosics. Acknowledgement: This research was supported by the World Class University (WCU) program through the National Research Foundation of Korea funded by the Ministry of Education, Science, and Technology (R31-1008-000-10008-0).}, web_url = {http://i-perception.perceptionweb.com/journal/I/article/if711}, event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)}, event_place = {Incheon, South Korea}, state = {published}, DOI = {10.1068/if711}, author = {Kim BR; Esins J{esins}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Article{ Bulthoff2012_16, title = {Review: L'empreinte Des Sens}, journal = {Perception}, year = {2012}, month = {7}, volume = {41}, number = {7}, pages = {881-882}, web_url = {http://pec.sagepub.com/content/41/7/881}, state = {published}, DOI = {10.1068/p4107rvw}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ EsinsSKWB2012, title = {Comparing the other-race-effect and congenital Prosopagnosia using a three-experiment test battery}, journal = {i-Perception}, year = {2012}, month = {7}, volume = {3}, number = {9}, pages = {688}, abstract = {Congenital prosopagnosia, an innate impairment in recognizing faces, as well as the other-race-effect, the disadvantage in recognizing faces of foreign races, both influence face recognition abilities. Here we compared both phenomena by testing three groups: German congenital prosopagnosics (cPs), unimpaired German and unimpaired South Korean participants (n=23 per group), on three tests with Caucasian faces. First we ran the Cambridge Face Memory Test (Duchaine & Nakayama, 2006 Neuropsychologia 44 576-585). Participants had to recognize Caucasian target faces in a 3AFC task. German controls performed better than Koreans (p=0.009) who performed better than prosopagnosics (p=0.0001). Variation of the individual performances was larger for cPs than for Koreans (p = 0.028). In the second experiment, participants rated the similarity of Caucasian faces (in-house 3D face-database) which differed parametrically in features or second order relations (configuration). We found differences between sensitivities to change type (featural or configural, p=0) and between groups (p=0.005) and an interaction between both factors (p = 0.019). During the third experiment, participants had to learn exemplars of artificial objects (greebles), natural objects (shells), and faces and recognize them among distractors. The results showed an interaction (p = 0.005) between stimulus type and participant group: cPs where better for non-face stimuli and worse for face stimuli than the other groups. Our results suggest that congenital prosopagnosia and the other-race-effect affect face perception in different ways. The broad range in performance for the cPs directs the focus of our future research towards looking for different forms of congenital prosopagnosia.}, file_url = {fileadmin/user_upload/files/publications/2012/APCV-2012-Poster-Esins.pdf}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/3/article/if688}, event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)}, event_place = {Incheon, South Korea}, state = {published}, DOI = {10.1068/if688}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Kim BR; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ JungAB2012, title = {What gives a face its race?}, journal = {i-Perception}, year = {2012}, month = {7}, volume = {3}, number = {9}, pages = {697}, abstract = {What gives a face its race?By biological criteria, human “races” do not exist (e.g., Cosmides et al., 2003). Nevertheless, every-day life and research from various fields show that we robustly and reliably perceive humans as belonging to different race groups. Here, we investigate the bases for our quick and easy judgments, by measuring the influence of manipulated facial features on race classification. Asian and Caucasian faces of our 3-dimensional face database (http://faces.kyb.tuebingen.mpg.de) were paired according to sex, age and overall appearance. With these Asian-Caucasian face pairs we created a variety of mixed-race faces, by exchanging facial features between both faces of a pair: eyes, nose, mouth, “outer” features, shape or texture. Original and modified faces were shown in a simple race classification task. We tested 24 Westerners (Germany) and 24 Easterners (South Korea). In both groups, eyes and texture were major determinants for race classification, followed by face shape, and then outer features, mouth, nose, which only had a weak influence on perceived face. Eastern participants classified Caucasian original faces better than Asian original faces, while Western participants were similarly good at classifying both races. Western participants - but not their Eastern counterparts - were less susceptible to eye, shape and texture manipulations in other-race faces than in their own-race faces. A closer look at the data suggests that this effect mainly originates from differences in processing male and female faces in Western participants only. Our results provide more evidence of differences between observers from different cultural and ethnic backgrounds in face perception and processing.}, file_url = {fileadmin/user_upload/files/publications/2012/APCV-2012-Jung.pdf}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/3/article/if697}, event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)}, event_place = {Incheon, South Korea}, state = {published}, DOI = {10.1068/if697}, author = {Jung W; Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2012_13, title = {Faces and beauty}, year = {2012}, month = {6}, day = {7}, event_name = {10th Anniversary Year of Cognitec Systems GmbH}, event_place = {Radebeul, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ ArmannB2011, title = {Male and female faces are only perceived categorically when linked to familiar identities – And when in doubt, he is a male}, journal = {Vision Research}, year = {2012}, month = {6}, volume = {63}, pages = {69–80}, abstract = {Categorical perception (CP) is a fundamental cognitive process that enables us to sort similar objects in the world into meaningful categories with clear boundaries between them. CP has been found for high-level stimuli like human faces, more precisely, for the perception of face identity, expression and ethnicity. For sex however, which represents another important and biologically relevant dimension of human faces, results have been equivocal so far. Here, we reinvestigate CP for sex using newly created face stimuli to control two factors that to our opinion might have influenced the results in earlier studies. Our new stimuli are (a) derived from single face identities, so that changes of sex are not confounded with changes of identity information, and (b) “normalized” in their degree of maleness and femaleness, to counteract natural variations of perceived masculinity and femininity of faces that might obstruct evidence of categorical perception. Despite careful normalization, we did not find evidence of CP for sex using classical test procedures, unless participants were specifically familiarized with the face identities before testing. These results support the single-route hypothesis, stating that sex and identity information in faces are not processed in parallel, in contrast to what was suggested in the classical Bruce and Young model of face perception. Besides, interestingly, our participants show a consistent bias, before and after perceptual normalization of the male–female range of the test morph continua, to judge faces as male rather than female.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0042698912001496}, state = {published}, DOI = {10.1016/j.visres.2012.05.005}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ ChuangVB2012_2, title = {Learned non-rigid object motion is a view-invariant cue to recognizing novel objects}, journal = {Frontiers in Computational Neuroscience}, year = {2012}, month = {5}, volume = {6}, number = {26}, pages = {1-8}, abstract = {There is evidence that observers use learned object motion to recognize objects. For instance, studies have shown that reversing the learned direction in which a rigid object rotated in depth impaired recognition accuracy. This motion reversal can be achieved by playing animation sequences of moving objects in reverse frame order. In the current study, we used this sequence-reversal manipulation to investigate whether observers encode the motion of dynamic objects in visual memory, and whether such dynamic representations are encoded in a way that is dependent on the viewing conditions. Participants first learned dynamic novel objects, presented as animation sequences. Following learning, they were then tested on their ability to recognize these learned objects when their animation sequence was shown in the same sequence order as during learning or in the reverse sequence order. In Experiment 1, we found that non-rigid motion contributed to recognition performance; that is, sequence-reversal decreased sensitivity across different tasks. In subsequent experiments, we tested the recognition of non-rigidly deforming (Experiment 2) and rigidly rotating (Experiment 3) objects across novel viewpoints. Recognition performance was affected by viewpoint changes for both experiments. Learned non-rigid motion continued to contribute to recognition performance and this benefit was the same across all viewpoint changes. By comparison, learned rigid motion did not contribute to recognition performance. These results suggest that non-rigid motion provides a source of information for recognizing dynamic objects, which is not affected by changes to viewpoint.}, web_url = {http://www.frontiersin.org/Journal/DownloadFile.ashx?pdf=1&FileId=%2062343&articleId=%2022441&Version=%201&ContentTypeId=21&FileName=%20fncom-06-00026.pdf}, state = {published}, DOI = {10.3389/fncom.2012.00026}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ PerdikisMKWL2012, title = {EEG brain dynamics during processing of static and dynamic facial emotional expression}, year = {2012}, month = {5}, abstract = {Humans recognize facial emotional expressions (FEEs) better when FEEs are presented dynamically than through static images. Wallraven et al. 2008 propose that humans are sensitive to the natural dynamics of FEEs. Moreover, PET/fMRI studies suggest that differentiated brain networks process static and dynamic FEEs. However, in most cases, dynamic FEEs have been created out of static ones, using linear morphing techniques. Together with the low time resolution of PET/fMRI, such studies fail to capture the modulation of the activated brain networks by the subtle (and highly nonlinear) dynamics of FEEs. Our ongoing study investigates EEG responses to static and dynamic FEEs drawn from an ecologically valid database (Kaulard et al. 2008, Kaulard et al. 2009). “Happy” and “angry” FEEs performed by two male and two female actors are displayed to twenty female participants in an “oddball” experimental paradigm. Blocks of either dynamic or static stimuli that differ in their emotional content (“happy” versus “angry” and reverse) are presented in a pseudorandom order. The task consists of pressing a keyboard button upon appearance of a deviant stimulus. Data analysis focuses on synchrony and nonlinear coupling of sensor as well as source dynamics (as a bridge to PET/fMRI studies), both in the time-frequency and in the phase-space domain, to identify the brain networks that emerge and evolve dynamically in each condition. Preliminary results from pilot data analysis confirm the PET/fMRI findings of enhanced and differentiated brain activations for dynamic FEEs compared to static ones.}, web_url = {http://escan2012.sciencesconf.org/4021}, event_name = {1st Conference of the European Society for Cognitive and Affective Neuroscience (ESCAN 2012)}, event_place = {Marseille, France}, state = {published}, author = {Perdikis D; M\"uller V; Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Lindenberg U} } @Article{ HelbigERPTMSN2011, title = {The neural mechanisms of reliability weighted integration of shape information from vision and touch}, journal = {NeuroImage}, year = {2012}, month = {4}, volume = {60}, number = {2}, pages = {1063–1072}, abstract = {Behaviourally, humans have been shown to integrate multisensory information in a statistically-optimal fashion by averaging the individual unisensory estimates according to their relative reliabilities. This form of integration is optimal in that it yields the most reliable (i.e. least variable) multisensory percept. The present study investigates the neural mechanisms underlying integration of visual and tactile shape information at the macroscopic scale of the regional BOLD response. Observers discriminated the shapes of ellipses that were presented bimodally (visual-tactile) or visually alone. A 2×5 factorial design manipulated (i) the presence vs. absence of tactile shape information and (ii) the reliability of the visual shape information (five levels). We then investigated whether regional activations underlying tactile shape discrimination depended on the reliability of visual shape. Indeed, in primary somatosensory cortices (bilateral BA2) and the superior parietal lobe the responses to tactile shape input were increased when the reliability of visual shape information was reduced. Conversely, tactile inputs suppressed visual activations in the right posterior fusiform, when the visual signal was blurred and unreliable. Somatosensory and visual cortices may sustain integration of visual and tactile shape information either via direct connections from visual areas or top-down effects from higher order parietal areas.}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811911011475}, state = {published}, DOI = {10.1016/j.neuroimage.2011.09.072}, author = {Helbig HB{helbig}{Research Group Multisensory Perception and Action}; Ernst MO{marc}{Research Group Multisensory Perception and Action}; Ricciardi E{ricciardi}; Pietrini P; Thielscher A{thielscher}{Department High-Field Magnetic Resonance}; Mayer KM{kama}{Research Group Multisensory Perception and Action}; Schultz J{johannes}; Noppeney U{unoppe}{Research Group Cognitive Neuroimaging}} } @Article{ KaulardCBW2012, title = {The MPI Facial Expression Database: A Validated Database of Emotional and Conversational Facial Expressions}, journal = {PLoS One}, year = {2012}, month = {3}, volume = {7}, number = {3}, pages = {1-18}, abstract = {The ability to communicate is one of the core aspects of human life. For this, we use not only verbal but also nonverbal signals of remarkable complexity. Among the latter, facial expressions belong to the most important information channels. Despite the large variety of facial expressions we use in daily life, research on facial expressions has so far mostly focused on the emotional aspect. Consequently, most databases of facial expressions available to the research community also include only emotional expressions, neglecting the largely unexplored aspect of conversational expressions. To fill this gap, we present the MPI facial expression database, which contains a large variety of natural emotional and conversational expressions. The database contains 55 different facial expressions performed by 19 German participants. Expressions were elicited with the help of a method-acting protocol, which guarantees both well-defined and natural facial expressions. The method-acting protocol was based on every-day scenarios, which are used to define the necessary context information for each expression. All facial expressions are available in three repetitions, in two intensities, as well as from three different camera angles. A detailed frame annotation is provided, from which a dynamic and a static version of the database have been created. In addition to describing the database in detail, we also present the results of an experiment with two conditions that serve to validate the context scenarios as well as the naturalness and recognizability of the video sequences. Our results provide clear evidence that conversational expressions can be recognized surprisingly well from visual information alone. The MPI facial expression database will enable researchers from different research fields (including the perceptual and cognitive sciences, but also affective computing, as well as computer vision) to investigate the processing of a wider range of natural facial expressions.}, web_url = {http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0032321}, state = {published}, DOI = {10.1371/journal.pone.0032321}, EPUB = {e32321}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Cunningham DW{dwc}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2012_14, title = {Was macht ein Gesicht hübsch?}, year = {2012}, month = {2}, day = {9}, web_url = {http://neuroschool-tuebingen-schuelerlabor.de/fileadmin/user_upload/Dokumente/lab/Lehrerfortbildung_2012.pdf}, event_name = {Lehrerfortbildung des Schülerlabors Neurowissenschaften: Ästhetische Empfindungen, Emotionen und neuronale Aktivität}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ GaissertW2011_2, title = {Categorizing natural objects: a comparison of the visual and the haptic modalities}, journal = {Experimental Brain Research}, year = {2012}, month = {1}, volume = {216}, number = {1}, pages = {123-134}, abstract = {Although the hands are the most important tool for humans to manipulate objects, only little is known about haptic processing of natural objects. Here, we selected a unique set of natural objects, namely seashells, which vary along a variety of object features, while others are shared across all stimuli. To correctly interact with objects, they have to be identified or categorized. For both processes, measuring similarities between objects is crucial. Our goal is to better understand the haptic similarity percept by comparing it to the visual similarity percept. First, direct similarity measures were analyzed using multidimensional scaling techniques to visualize the perceptual spaces of both modalities. We find that the visual and the haptic modality form almost identical perceptual spaces. Next, we performed three different categorization tasks. All tasks exhibit a highly accurate processing of complex shapes of the haptic modality. Moreover, we find that objects grouped into the same category form regions within the perceptual space. Hence, in both modalities, perceived similarity constitutes the basis for categorizing objects. Moreover, both modalities focus on shape to form categories. Taken together, our results lead to the assumption that the same cognitive processes link haptic and visual similarity perception and the resulting categorization behavior.}, web_url = {http://www.springerlink.com/content/h8174v8813827266/fulltext.pdf}, state = {published}, DOI = {10.1007/s00221-011-2916-4}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Article{ DopjansBW2012, title = {Serial exploration of faces: Comparing vision and touch}, journal = {Journal of Vision}, year = {2012}, month = {1}, volume = {12}, number = {1:6}, pages = {1-14}, abstract = {Even though we can recognize faces by touch surprisingly well, haptic face recognition performance is still worse than for visual exploration. One possibility for this performance difference might be due to different encoding strategies in the two modalities, namely, holistic encoding in vision versus serial encoding in haptics. Here, we tested this hypothesis by promoting serial encoding in vision, using a novel, gaze-restricted display that limited the effective field of view in vision to resemble that of haptic exploration. First, we compared haptic with gaze-restricted and unrestricted visual face recognition. Second, we used the face inversion paradigm to assess how encoding differences might affect processing strategies (featural vs. holistic). By promoting serial encoding in vision, we found equal face recognition performance in vision and haptics with a clear switch from holistic to featural processing, suggesting that performance differences in visual and haptic face recognition are due to modality-specific encoding strategies.}, web_url = {http://www.journalofvision.org/content/12/1/6.full.pdf+html}, state = {published}, DOI = {10.1167/12.1.6}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_4, title = {Perception of the active observer}, year = {2011}, month = {11}, day = {2}, abstract = {As active observers, we move our eyes, re-orient our bodies and even manipulate our environment to access task-relevant information. The purpose of this talk is demonstrate that our understanding of human behavior can be enriched by considering that the observer is often-times responsible for his own perceptual input. I will do so by first presenting research that: a) addressed how object speeds are estimated during locomotion, b) investigated how we explore objects during learning for subsequent recognition. Following this, I will present research in two application scenarios that exemplifies the role of the active observer — namely, teleoperation of swarm-UAVs and gazetracking on wall-sized displays.}, web_url = {http://ikw.uni-osnabrueck.de/en/node/680}, event_name = {Institute of Cognitive Science, Universität Osnabrück}, event_place = {Osnabrück, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Article{ ArmannJCR2011, title = {Race-specific norms for coding face identity and a functional role for norms}, journal = {Journal of Vision}, year = {2011}, month = {11}, volume = {11}, number = {13:9}, pages = {1-14}, abstract = {Models of face perception often adopt a framework in which faces are represented as points or vectors in a multidimensional space, relative to the average face that serves as a norm for encoding. Since faces are very similar in their configuration and share many visual properties, they could be encoded in one common space against one norm. However, certain face properties may result in grouping and “subclassification” of similar faces. We studied the processing of faces of different races, using high-level aftereffects, where exposure to one face systematically distorts the perception of a subsequently viewed face toward the “opposite” identity in face space. We measured identity aftereffects for adapt–test pairs that were opposite relative to race-specific (Asian and Caucasian) averages and pairs that were opposite relative to a “generic” average (both races morphed together). Aftereffects were larger for race-specific compared to mixed-race adapt–test pairs. These results suggest that race-specific norms are used to code identity because aftereffects are generally larger for adapt–test pairs drawn from trajectories passing through the norm (opposite pairs) than for those that do not. We also found that identification thresholds were lower when targets were distributed around race-specific averages than around the mixed-race average, suggesting that norm-based face encoding may play a functional role in facilitating identity discrimination.}, web_url = {http://www.journalofvision.org/content/11/13/9.full.pdf+html}, state = {published}, DOI = {10.1167/11.13.9}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; Jeffery L; Calder AJ; Rhodes G} } @Poster{ PapeWSBM2011, title = {Grid cell remapping in humans}, year = {2011}, month = {11}, volume = {41}, number = {288.05}, abstract = {Grid cells in entorhinal cortex of freely moving rodents were proposed to provide a universal metric of space. They tile the environment into a six-fold symmetric pattern with a particular orientation relative to the environment. The six-fold rotational symmetry of grid patterns can be used to predict a macroscopic signal to functional magnetic resonance imaging (fMRI) in humans [Doeller et al, 2010, Nature]. During hippocampal remapping, grid pattern orientations in rats also change. The purpose of the present study is to examine whether orientation changes (i.e., remapping) can also be found in humans. Participants learned object locations within a virtual room (see Figure 1 left side) and retrieved locations from different start locations during two scanning sessions. They then navigated into an adjacent room and repeated the procedure. We extracted grid orientations from odd trials, and predicted the BOLD response in even trials as a function of the deviation between running direction and the estimated grid orientation for each session. This prediction was significant for the right entorhinal cortex, replicating earlier findings. In 80% of the cases grid cell orientations significantly differed between sessions both within a room and between rooms (see Figure 1 right side). Switching off the virtual environment between sessions for about one minute was seemingly sufficient for that. For male, but not for female participants, grid cell orientation was clustered around the random view of the room experienced at session start. Data suggests that human grid cell orientations can be rather flexible which might be due to the virtuality of the experience. Grid cell orientation might at least for male participants be related to the initial view of an environment.}, web_url = {http://www.sfn.org/am2011/}, event_name = {41st Annual Meeting of the Society for Neuroscience (Neuroscience 2011)}, event_place = {Washington, DC, USA}, state = {published}, author = {Pape A-A{antopia}{Department Human Perception, Cognition and Action}; Wolbers T; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Conference{ GaissertWvBW2011, title = {Efficient cross-modal transfer of shape information in visual and haptic object categorization}, journal = {i-Perception}, year = {2011}, month = {10}, day = {18}, volume = {2}, number = {8}, pages = {822}, abstract = {Categorization has traditionally been studied in the visual domain with only a few studies focusing on the abilities of the haptic system in object categorization. During the first years of development, however, touch and vision are closely coupled in the exploratory procedures used by the infant to gather information about objects. Here, we investigate how well shape information can be transferred between those two modalities in a categorization task. Our stimuli consisted of amoeba-like objects that were parametrically morphed in well-defined steps. Participants explored the objects in a categorization task either visually or haptically. Interestingly, both modalities led to similar categorization behavior suggesting that similar shape processing might occur in vision and haptics. Next, participants received training on specific categories in one of the two modalities. As would be expected, training increased performance in the trained modality; however, we also found significant transfer of training to the other, untrained modality after only relatively few training trials. Taken together, our results demonstrate that complex shape information can be transferred efficiently across the two modalities, which speaks in favor of multisensory, higher-level representations of shape.}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic822}, event_name = {12th International Multisensory Research Forum (IMRF 2011)}, event_place = {Fukuoka, Japan}, state = {published}, DOI = {10.1068/ic822}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Waterkamp S{swaterka}{Department Human Perception, Cognition and Action}; van Dam L{vandam}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonCKB2011, title = {Haptic Feedback Cues Can Improve Human Perceptual Awareness in Multi-Robots Teleoperation}, year = {2011}, month = {10}, pages = {1323-1328}, abstract = {The availability of additional force cues in haptic devices are often expected to improve control performance, over conditions that only provide visual feedback. However, there is little empirical evidence to show this to be true for the teleoperation control of remote vehicles (i.e., multiple unmanned aerial vehicles (UAVs)). In this paper, we show that force cues can increase one's sensitivity in discerning the presence of obstacles in the remote multi-UAVs' environment. Significant benefits, relative to a purely visual scenario, were achieved only when force cues were sufficiently amplified by large gains. In addition, force cues tended to provide stronger benefits when they were based on the UAVs' velocity information.}, web_url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6106130}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {11th International Conference on Control, Automations and Systems (ICCAS 2011)}, event_place = {Gyeonggi-do, Korea}, state = {published}, ISBN = {978-1-4577-0835-0}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ LeeBBC2011, title = {Fast Fitting on a Saccadic Eye Movement Model for Decision Making}, year = {2011}, month = {10}, volume = {12}, pages = {33}, abstract = {How does our visual system decide where to look? The Linear Approach to Threshold with Ergodic Rate (LATER: Carpenter, 1995) is a simple decision-making model for saccadic eye movements. Currently, experimental data suggest that saccadic eye-movements can be discriminated according to whether they are performed for directed fixations or for item recognition (Montagnini & Chelazzi, 2005; Bieg et al., submitted). Unfortunately, sufficient goodness-of-fit can only be acquired with large datasets, for each individual participant. Here, we investigate whether adapting LATER with modern computational methods can allow for saccades to be classified for their functionality, with minimal data and in real-time. In doing so, we strive towards the eventual goal of using the LATER model for predicting observer intentions in real-world applications.}, event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Lee JJ{jlee}{Department Human Perception, Cognition and Action}; Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ PapeWSBM2011_2, title = {Grid cell remapping in humans}, year = {2011}, month = {10}, volume = {12}, pages = {38}, abstract = {Grid cells in entorhinal cortex of freely moving rodents were proposed to provide a universal metric of space. They tile the environment into a six-fold symmetric pattern with a particular orientation relative to the environment. The six-fold rotational symmetry of grid patterns can be used to predict a macroscopic signal to functional magnetic resonance imaging (fMRI) in humans [Doeller et al, 2010, Nature]. During hippocampal remapping, grid pattern orientations in rats also change. The purpose of the present study is to examine whether orientation changes (i.e. remapping) can also be found in humans. Participants learned object locations within a virtual room and retrieved locations from different start locations during two scanning sessions. They then navigated into an adjacent room and repeated the procedure. We extracted grid orientations from odd trials, and predicted the BOLD response in even trials as a function of the deviation between running direction and the estimated grid orientation for each session. This prediction was significant for the right entorhinal cortex, replicating earlier findings. In 80% of the cases grid cell orientations significantly differed between sessions both within a room and between rooms. Switching off the virtual environment between sessions for about one minute was seemingly sufficient for that. For male, but not for female participants, grid cell orientation was clustered around the random view of the room experienced at session start. Data suggests that human grid cell orientations can be rather flexible which might be due to the virtuality of the experience. Grid cell orientation might at least for male participants be related to the initial view of an environment.}, event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Pape A-A{antopia}{Department Human Perception, Cognition and Action}; Wolbers T; Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_3, title = {Moving objects: From object speed estimation to object exploration}, year = {2011}, month = {10}, web_url = {http://www.liv.ac.uk/psychology/}, event_name = {Department of Psychology, University of Liverpool}, event_place = {Liverpool, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_2, title = {The active observer: Implications for science and engineering}, year = {2011}, month = {10}, web_url = {http://www.tno.nl/content.cfm?context=thema&content=markt_product&laag1=892&laag2=184&laag3=401&item_id=1581&Taal=1}, event_name = {TNO Human Factors}, event_place = {Soesterberg, Netherlands}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Article{ BulthoffC2011, title = {Seeing: The Computational Approach to Biological Vision. Second Edition. By John P. Frisby and James V. Stone. Cambridge (Massachusetts): MIT Press}, journal = {Quarterly Review of Biology}, year = {2011}, month = {9}, volume = {86}, number = {3}, pages = {227}, web_url = {http://www.journals.uchicago.edu/doi/10.1086/661174}, state = {published}, DOI = {10.1086/661174}, author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Article{ GaissertBW2011, title = {Similarity and categorization: From vision to touch}, journal = {Acta Psychologica}, year = {2011}, month = {9}, volume = {138}, number = {1}, pages = {219-230}, abstract = {Even though human perceptual development relies on combining multiple modalities, most categorization studies so far have focused on the visual modality. To better understand the mechanisms underlying multisensory categorization, we analyzed visual and haptic perceptual spaces and compared them with human categorization behavior. As stimuli we used a three-dimensional object space of complex, parametrically-defined objects. First, we gathered similarity ratings for all objects and analyzed the perceptual spaces of both modalities using multidimensional scaling analysis. Next, we performed three different categorization tasks which are representative of every-day learning scenarios: in a fully unconstrained task, objects were freely categorized, in a semi-constrained task, exactly three groups had to be created, whereas in a constrained task, participants received three prototype objects and had to assign all other objects accordingly. We found that the haptic modality was on par with the visual modality both in recovering the topology of the physical space and in solving the categorization tasks. We also found that within-category similarity was consistently higher than across-category similarity for all categorization tasks and thus show how perceptual spaces based on similarity can explain visual and haptic object categorization. Our results suggest that both modalities employ similar processes in forming categories of complex objects.}, web_url = {http://www.sciencedirect.com/science?_ob=MiamiImageURL&_cid=272045&_user=29041&_pii=S0001691811001302&_check=y&_origin=&_coverDate=30-Sep-2011&view=c&wchp=dGLbVBA-zSkzk&md5=8320348e946ed057a766d4d041954797/1-s2.0-S0001691811001302-main.pdf}, state = {published}, DOI = {10.1016/j.actpsy.2011.06.007}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonCFKLLBR2011, title = {Measuring an Operator's Maneuverability Performance in the Haptic Teleoperation of Multiple Robots}, year = {2011}, month = {9}, pages = {3039-3046}, abstract = {In this paper, we investigate the maneuverability performance of human teleoperators on multi-robots. First, we propose that maneuverability performance can be assessed by a frequency response function that jointly considers the input force of the operator and the position errors of the multi-robot system that is being maneuvered. Doing so allows us to evaluate maneuverability performance in terms of the human teleoperator's interaction with the controlled system. This allowed us to effectively determine the suitability of different haptic cue algorithms in improving teleoperation maneuverability. Performance metrics based on the human teleoperator's frequency response function indicate that maneuverability performance is best supported by a haptic feedback algorithm which is based on an obstacle avoidance force.}, file_url = {fileadmin/user_upload/files/publications/2011/IROS-2011-Son.pdf}, web_url = {http://www.iros2011.org/}, editor = {Amato, N.M.}, publisher = {IEEE}, address = {Piscatawy, NJ, USA}, event_name = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2011)}, event_place = {San Francisco, CA, USA}, state = {published}, ISBN = {978-1-61284-454-1}, DOI = {10.1109/IROS.2011.6048185}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Lee D; Lee S-W; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}} } @Poster{ GaissertWvB2011, title = {Cross-modal transfer in visual and haptic object categorization}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {134}, abstract = {When humans have to categorize objects they often rely on shape as a deterministic feature. However, shape is not exclusive to the visual modality: the haptic system is also an expert in identifying shapes. This raises the question whether humans store separate modality-dependent shape representations or if one multimodal representation is formed. To better understand how humans categorize objects based on shape we created a set of computer-generated amoeba-like objects varing in defined shape steps. These objects were then printed using a 3D printer to generate tangible stimuli. In a discrimination task and a categorization task, participants either visually or haptically explored the objects. We found that both modalities lead to highly similar categorization behavior indicating that the processes underlying categorization are highly similar in both modalities. Next, participants were trained on special shape categories by using the visual modality alone or by using the haptic modality alone. As expected, visual training increased visual performance and haptic training increased haptic performance. Moreover, we found that visual training on shape categories greatly improved haptic performance and vice versa. Our results point to a shared representation underlying both modalities, which accounts for the surprisingly strong transfer of training across the senses.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Waterkamp S{swaterka}{Department Human Perception, Cognition and Action}; van Dam L{vandam}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ SchultzB2011, title = {How does the brain identify living things based on their motion?}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {682}, abstract = {Identifying living moving things in the environment is a priority for animals, as these could be prey, enemies or mates. When the shape of the moving object is hard to see (fog, twilight, great distance, small animal), motion becomes an important cue to detect it. The neural correlates of the detection of an isolated living entity on the basis of its motion are largely unknown. To study this phenomenon, we developed a single-dot stimulus, thus eliminating all possible sources of information about form, spatial arrangement, shape or structure of the object. The dot moved such that it appeared self-propelled, or moved by an external force, or something intermediary according to a small set of parameters. Self-propelled stimuli were perceived as more animate (= more likely to be alive) than the externally-moved stimuli, with a gradual transition occurring in the intermediary morphs following a classic psychometric function (cumulative gaussian). In an fMRI experiment, 20 subjects had to categorize these stimuli into alive and non-alive. A region of the left medial posterior parietal cortex (mPPC) showed BOLD signal correlating with the probability of animacy judgments about the moving dot. While activation in parts of the early visual cortex showed the same response, the mPPC was the only region in which changes in percept had a stronger effect on activation than physical changes in the stimuli. In addition, only the mPPC showed BOLD signal increases when a stimulus was judged to be animate, irrespective of its physical characteristics. This study shows that parts of the early visual cortex but particularly the medial posterior parietal cortex (mPPC) are involved in judging the animacy of an isolated translating visual stimulus, without information about its form.}, web_url = {http://www.journalofvision.org/content/11/11/682.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.682}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ DobsKBSC2011, title = {Investigating idiosyncratic facial dynamics with motion retargeting}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {115}, abstract = {3D facial animation systems allow the creation of well-controlled stimuli to study face processing. Despite this high level of control, such stimuli often lack naturalness due to artificial facial dynamics (eg linear morphing). The present study investigates the extent to which human visual perception can be fooled by artificial facial motion. We used a system that decomposes facial motion capture data into time courses of basic action shapes (Curio et al, 2006 APGV 1 77–84). Motion capture data from four short facial expressions were input to the system. The resulting time courses and five approximations were retargeted onto a 3D avatar head using basic action shapes created manually in Poser. Sensitivity to the subtle modifications was measured in a matching task using video sequences of the actor performing the corresponding expressions as target. Participants were able to identify the unmodified retargeted facial motion above chance level under all conditions. Furthermore, matching performance for the different approximations varied with expression. Our findings highlight the sensitivity of human perception for subtle facial dynamics. Moreover, the action shape-based system will allow us to further investigate the perception of idiosyncratic facial motion using well-controlled facial animation stimuli.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Dobs K{kdobs}{Department Human Perception, Cognition and Action}; Kleiner M{kleinerm}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangS2011, title = {Object speed estimation during walking does not add up}, year = {2011}, month = {9}, web_url = {http://www.bccn-tuebingen.de/news/article/symposium-imultisensory-perception-and-actioni-96.html}, event_name = {Bernstein Cluster D Symposium: Multisensory Perception and Action}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangBS2011, title = {The center-surround effect in visual speed estimation during walking}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {129}, abstract = {Walking reduces visual speed estimates of optic flow (Souman et al, 2010 Journal of Vision 10(11):14]. Simultaneously, visual background motion can influence the perceived speed of moving objects (Tynan and Sekular, 1975 Vision Research 25 1231–1238; Baker and Graf, 2010 Vision Research 50 193–201). These two effects have been attributed to different subtractive processes, which may help in segregating object motion from self-motion induced optic flow. Here, we investigate how both factors jointly contribute to the perceived visual speed of objects. Participants compared the speed of two central Gabor patches on a ground plane, presented in consecutive intervals, either while standing still or while walking on a treadmill. In half the trials, one of the Gabors was surrounded by a moving random dot pattern, the speed of which matched walking speed. Our results replicated previous findings. A moving surround as well as walking can independently induce a subtractive effect on the perceived speed of the moving center, with the effect size increasing with center speed. However, walking does not affect visual speed estimates of the center when a visual surround is present. These results suggest that the visual input dominates the segregation of object motion from background optic flow.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Souman J{souman}{Department Human Perception, Cognition and Action}} } @Poster{ LeeBAWB2011, title = {The other-race effect is not ubiquitous}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {626}, abstract = {race (the other-race effect or ORE) has been widely cited. Nevertheless, recognizing the identity of a face is a complex task among many others; hence it might be premature to conclude that own-race faces are always easier to process. We investigated whether same-race faces still have a processing advantage over other-race faces when only ethnicity-related information is available to differentiate between faces. We morphed the ethnicity of 20 Caucasians and 20 Asians faces toward their other-race counterpart while keeping their idiosyncratic, identity-related features. Morphing was done at three levels (20%, 50%, and 80% toward the other race). The task for two groups of participants (25 Tübingen and 26 Seoul participants) was to report which face looks more Caucasian (or Asian) after looking at the original face and a morphed face sharing the same idiosyncratic features. Both faces were presented side by side on a computer monitor in one task and sequentially in another task. Importantly, we found no evidence for an ORE in participants’ performance and no performance difference between Tübingen and Seoul participants. Both groups were equally good and equally fast at comparing the ethnicity of two faces regardless of the task, the ethnicity of the faces and the question asked. However, we did find evidence that Seoul and Tübingen participants used different viewing strategies. By investigating their eye-movements in the sequential task, we found that the ethnicity of participants affected fixation durations on specific areas of the face, especially the nose. Also, the type of questions asked and stimulus race altered the pattern of eye movements. These results suggest that although Caucasians and Asians were equally good at dealing with ethnicity information of both races, they might employ different viewing strategies.}, web_url = {http://www.journalofvision.org/content/11/11/626.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.626}, author = {Lee RK{ryokyung}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Armann R{armann}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ EsinsBS2011, title = {The role of featural and configural information for perceived similarity between faces}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {673}, abstract = {An important aspect of face recognition involves the role of featural and configurational information for face perception (e.g. Tanaka and Farah, 1993; Yovel and Duchaine, 2006; Rotshtein et al, 2007). In our study, we investigated the influence of featural and configural information on perceived similarity between faces. Eight pairs of male faces were chosen from our digital face database (http://faces.kyb.tuebingen.mpg.de). The texture and the face shape for both faces in a pair were equalized to create 2 basis faces that differed only in their inner facial features and their configuration, but not in face shape or texture. A computer algorithm allowed to parametrically morph the features, the configuration, or both between the two basis faces of a pair. In our case the morphing was done in 25% steps. 24 participants rated the similarity between pairs of the created faces using a 7-point Likert scale. The faces to compare came from the same basis face pair and could differ either in features or in configuration by 0%, 25%, 50%, 75% or 100%. The results revealed that for the same amount of morphing, faces differing by their features are perceived as less similar than faces differing by their configurations. These findings replicate previous results obtained with less natural or less controlled conditions. Furthermore, we found that linear increases of the difference between both faces in configural or featural information resulted in a nonlinear increase of perceived dissimilarity. An important aspect for the relevance of our results is how natural the face stimuli look like. We asked 24 participants to rate the naturalness of all stimuli including the original faces and the created faces. Despite numerous manipulations, the vast majority of our created face stimuli were rated as natural as the original faces.}, file_url = {fileadmin/user_upload/files/publications/2011/VSS-2011-Esins.pdf}, web_url = {http://www.journalofvision.org/content/11/11/673.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.673}, author = {Esins J{esins}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ KaulardFBS2011, title = {Uncovering the principles that allow a distinction of conversational facial expressions}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {605}, abstract = {Facial expressions convey both emotional and conversational signals. Research focuses mostly on EMOTIONAL expressions and consistenly finds that these can be reliably distinguished along at least two dimensions: valence and arousal. CONVERSATIONAL expressions, i.e. those conveying mainly communicative meaning, are thought to be less emotional laden. Interestingly, we found evidence pointing towards the same first two underlying dimensions for CONVERSATIONAL expressions when presented dynamically. The question now arises: "Is the emergence of the valence and arousal dimensions for conversational facial expressions based on the emotional content of these expressions?" To answer this, we used questions addressing the emotional (Fontaine et al, 2007) and the conversational content separately. If the distinction of conversational expressions is based on the small amount of emotional information they might contain, we expect emotional content questions to allow a separation of those expressions. Ten native German participants answered a set of 27 questions for 6 emotional and 6 conversational expressions, both presented statically and dynamically, using a rating scale. A dissimilarity matrix was computed for the expressions. To uncover the meaning of the first two underlying dimensions allowing expression differentiation, multidimensional scaling (MDS) was used. Our results show that static and dynamic emotional expressions can only be distinguished by means of emotional content questions. For these emotional expressions, the valence and arousal dimensions emerged in the MDS. In contrast, conversational expressions can be distinguished using conversational content questions but not using emotional content questions. Unlike emotional expressions, dynamic information improved distinction of conversational expressions substantially. We found evidence for valence and arousal to be the underlying dimensions for conversational expressions. Our results suggest that the distinction of conversational expressions along the first two dimensions is based on conversational rather than emotional content. Moreover, different types of facial expressions benefit to different degrees from dynamic information.}, web_url = {http://www.journalofvision.org/content/11/11/605.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.605}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Fernandez Cruz AL{anafer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffSMT2011, title = {Using avatars to explore height/pitch effects when learning new faces}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {596}, abstract = {In a previous series of desktop experiments we found no evidence that individuals' height influenced their representation of others' faces or their ability to process faces viewed from above or below (VSS 2009). However, in those experiments face orientation and body height were ambiguous as isolated faces were shown on a computer screen to an observer sitting on a chair. To address those concerns and to specifically examine the influence of learned viewpoint, we created a virtual museum containing 20 full-bodied avatars (statues) that were either sitting or standing. Using a head-mounted display, observers walked through this virtual space three times, approached each statue and viewed them from any horizontal (yaw) angle without time restrictions. We equated eye-level - and thus simulated height – for all participants and restricted their vertical movement to ensure that the faces of sitting avatars were always viewed from above and standing avatars from below. After familiarization, recognition was tested using a standard old-new paradigm in which 2D images of the learnt faces were shown from various viewpoints. Results showed a clear influence of learned viewpoint. Faces that had been learned from above (below) were recognized more quickly and accurately in that orientation than from the opposite orientation. Thus, recognition of specific, newly learned faces appears to be view-dependent in terms of pitch angle. Our failure to find a height effect in our previous study suggests that the variety of views of human faces experienced during a lifetime and possibly the preponderance of conversational situations between humans at close range typically counteracts any influence that body size might have on a person's viewing experience of others' faces.}, web_url = {http://www.journalofvision.org/content/11/11/596.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.596}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Shrimpton S{sezys}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Poster{ KaularddSFBW2011, title = {What are the properties underlying similarity judgments of facial expressions?}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {115}, abstract = {Similarity ratings are used to investigate the cognitive representation of facial expressions. The perceptual and cognitive properties (eg physical aspects, motor expressions, action tendencies) driving the similarity judgments of facial expressions are largely unknown. We examined potentially important properties with 27 questions addressing the emotional and conversational content of expressions (semantic differential). The ratings of these semantic differentials were used as predictors for facial expression similarity ratings. The semantic differential and similarity-rating task were performed on the same set of facial expression videos: 6 types of emotional (eg happy) and 6 types of conversational (eg don’t understand) expressions. Different sets of participants performed the two tasks. Multiple regression was used to predict the similarity data from the semantic differential questions. The best model for emotional expressions consisted of two emotional questions explaining 75% of the variation in similarity ratings. The same model explained significantly less variation for conversational expressions (38%). The best model for those expressions consisted of a single conversational question explaining 44% of the variation. This study shows which properties of facial expressions might affect their perceived similarity. Moreover, our results suggest that different perceptual and cognitive properties might underlie similarity judgments about emotional and conversational expressions.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Fernandez Cruz AL{anafer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Poster{ SchultzBP2011, title = {What human brain regions like about moving faces?}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {116}, abstract = {Visual perception of moving faces activates parts of the human superior temporal sulcus (STS) whereas static facial information is mainly processed in areas of ventral temporal and lateral occipital cortex. However, recent findings show that the latter regions also respond more to moving faces than to static faces. Here, we investigated the origin of this activation increase, considering the following causes: (i) facial motion per se, (ii) increased static information due to the higher number of frames constituting the movie stimuli, and/or (iii) increased attention towards moving faces. We presented non-rigidly moving faces to subjects in an fMRI scanner. We manipulated static face information and motion fluidity by presenting ordered and scrambled sequences of frames at the original or reduced temporal resolutions. Subjects performed a detection task unrelated to the face stimuli in order to equate attentional influences. Results confirm the increased response due to facial motion in the face-sensitive temporal regions. Activation generally increased with the number of frames but decreased when frames were scrambled. These results indicate that the activation increase induced by moving faces is due to smooth, natural motion and not only to increased static information or attentional modulation.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Brockhaus M{mabrockhaus}{Department Human Perception, Cognition and Action}; Pilz K{kpilz}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011, title = {How do we seek out information?}, year = {2011}, month = {8}, day = {29}, abstract = {Many tasks require us to access relevant information from a dynamic visual input. To do so, we move our eyes and bodies as well as manipulate our environments. Unfortunately, experiments on human behavior tend to ignore this fact, often to the detriment of their ecological validity. Our understanding can be better informed by studying how humans actively seek out relevant information in their unrestrained and task-relevant workspaces. I will present several research studies from our lab to demonstrate this point. These studies relate to how humans explore novel objects, unrestrained gaze measurements on wall-sized displays, and the influence of haptic force feedback on the teleoperation of micro unmanned aerial vehicles. Finally, I will introduce our latest research project that targets the implications of a personal air transport system (www.mycopter.eu).}, file_url = {fileadmin/user_upload/files/publications/2011/D-CIS-Lab-2011-Chuang.pdf}, web_url = {http://www.d-cis.nl/news/210-colloquium-how-do-we-seek-out-information-}, event_name = {D-CIS Lab Colloquium}, event_place = {Delft, Netherlands}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ BulthoffAWB2011, title = {Investigating the other-race effect in different face recognition tasks}, journal = {i-Perception}, year = {2011}, month = {7}, volume = {2}, number = {4}, pages = {355}, abstract = {Faces convey various types of information like identity, ethnicity, sex or emotion. We investigated whether the well-known other-race effect (ORE) is observable when facial information other than identity varies between test faces. First, in a race comparison task, German and Korean participants compared the ethnicity of two faces sharing similar identity information but differing in ethnicity. Participants reported which face looked more Asian or Caucasian. Their behavioral results showed that Koreans and Germans were equally good at discriminating ethnicity information in Asian and Caucasian faces. The nationality of participants, however, affected their eye-movement strategy when the test faces were shown sequentially, thus, when memory was involved. In the second study, we focused on ORE in terms of recognition of facial expressions. Korean participants viewed Asian and Caucasian faces showing different facial expressions for 100ms to 800ms and reported the emotion of the faces. Surprisingly, under all three presentation times, Koreans were significantly better with Caucasian faces. These two studies suggest that ORE does not appear in all recognition tasks involving other-race faces. Here, when identity information is not involved in the task, we are not better at discriminating ethnicity and facial expressions in same race compared to other race faces.}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic355}, event_name = {7th Asia-Pacific Conference on Vision (APCV 2011)}, event_place = {Hong Kong}, state = {published}, DOI = {10.1068/ic355}, author = {Lee RK{ryokyung}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Armann R{armann}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2011_10, title = {Interplay between identity and sex recognition in familiar faces}, year = {2011}, month = {6}, day = {20}, event_name = {Workshop "Yet another journey through computation"}, event_place = {Genova, Italy}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonKCFRLB2011, title = {An Evaluation of Haptic Cues on the Tele-Operator's Perceptual Awareness of Multiple UAVs' Environments}, year = {2011}, month = {6}, pages = {149-154}, abstract = {The use of multiple unmanned aerial vehicles (UAVs) is increasingly being incorporated into a wide range of teleoperation applications. To date, relevant research has largely been focused on the development of appropriate control schemes. In this paper, we extend previous research by investigating how control performance could be improved by providing the teleoperator with haptic feedback cues. First, we describe a control scheme that allows a teleoperator to manipulate the flight of multiple UAVs in a remote environment. Next, we present three designs of haptic cue feedback that could increase the teleoperator's environmental awareness of such a remote environment. These cues are based on the UAVs' i) velocity information, ii) proximity to obstacles, and iii) a combination of these two sources of information. Finally, we present an experimental evaluation of these haptic cue designs. Our evaluation is based on the teleoperator's perceptual sensitivity to the physical environment inhabited by the multiple UAVs. We conclude that a teleoperator's perceptual sensitivity is best served by haptic feedback cues that are based on the velocity information of multiple UAVs.}, file_url = {fileadmin/user_upload/files/publications/2011/WHC-2011-Son.pdf}, web_url = {http://www.haptics2011.org/en/}, editor = {Jones, L. , M. Harders, Y. Yokokohji}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE 2011 World Haptics Conference (WHC 2011)}, event_place = {Istanbul, Turkey}, state = {published}, ISBN = {978-1-4577-0299-0}, DOI = {10.1109/WHC.2011.5945477}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}; Lee D; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GaissertW2011, title = {Integrating Visual and Haptic Shape Information to Form a Multimodal Perceptual Space}, year = {2011}, month = {6}, pages = {451-456}, abstract = {In this study we want to address the question to what extent the visual and the haptic modalities contribute to the final formation of a complex multisensory perceptual space. By varying three shape parameters a physical shape space of shell-like objects was generated. Participants were allowed to either see or touch the objects or use both senses to explore the objects. Similarity ratings were performed and analyzed using multidimensional scaling (MDS) techniques. By comparing the unimodal perceptual spaces to the multimodal perceptual space we tried to resolve the impact of the visual and the haptic modalities on the combined percept. We found that neither the visual nor the haptic modality dominated the final percept, but rather that the two modalities contributed to the combined percept almost equally. To investigate to which degree these results are transferrable to natural objects, we performed the same visual, haptic, and visuo-haptic similarity ratings and multidimensional scaling analyses using a set of natural sea shells. Again, we found almost equal contributions of the visual and the haptic modalities to the combined percept. Our results suggest that multisensory perceptual spaces are based on a complex combination of object information gathered by different senses.}, web_url = {http://www.haptics2011.org/en/}, editor = {Jones, L.A. , M. Harders, Y. Yokokohji}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE World Haptics Conference (WHC 2011)}, event_place = {Istanbul, Turkey}, state = {published}, ISBN = {978-1-4577-0299-0}, DOI = {10.1109/WHC.2011.5945528}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Conference{ WallravenC2011, title = {Non-accidental properties determine object exploration patterns}, year = {2011}, month = {5}, pages = {1-2}, file_url = {fileadmin/user_upload/files/publications/2011/ICCNS-2011-Wallraven.pdf}, web_url = {http://cns.bu.edu/cns-meeting/2011conference.html}, event_name = {15th International Conferece on Cognitive and Neural Systems (ICCNS 2011)}, event_place = {Boston, MA, USA}, state = {published}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ DavidSVE2011, title = {Individuals with Autism Show a Selective Deficit for the Understanding of Interacting Animated Objects}, journal = {Journal of Cognitive Neuroscience}, year = {2011}, month = {4}, day = {3}, volume = {23}, number = {Supplement}, pages = {64}, abstract = {A focus on social deficits in autism spectrum disorders (ASD) has, for a long time, obscured the existence of lower-level perceptual abnormalities, although the earliest descriptions of autism included abnormalities in oculomotor behavior and visual attention. More recently, however, abnormalities in perception and attention have increasingly been discussed as influential factors in ASD-specific psychopathology. To this end, the perception of coherent motion in random-dot kinematograms, biological motion in point-light walkers and agency in animated shapes have been investigated in ASD but their relationship remains a matter of debate. It also is unclear whether ASD-related deficits result from difficulties in global motion perception or in processing motion that contains socially relevant signals (e.g. a body and actions). We tested 18 individuals with highfunctioning autism and 16 age-, gender- and IQ-matched control participants, who performed three tasks on a continuum of motion cues and social complexity: (1) low-level translational motion that moved up or down, (2) complex motion of a single dot that moved in an animate or inanimate way, (3) complex motion of two dots that interacted or not. None of these tasks contained objects with human shape and only the first task contained global motion. Participants with autism were selectively impaired in detecting social interaction between two animated shapes (task 3), while low-level motion processing (task 1) and the detection of isolated agents (task 2) were preserved. These findings suggest a distinct social impairment in ASD in understanding interacting agents.}, web_url = {http://cogneurosociety.org/annual-meeting/previous-meetings/CNS2011_Program.pdf/view}, event_name = {18th Annual Meeting of the Cognitive Neuroscience Society (CNS 2011)}, event_place = {San Francisco, CA, USA}, state = {published}, author = {David N; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Vogeley K; Engel A} } @Poster{ DavidSVE2011_2, title = {Individuals with autism are impaired in social animacy perception but not in lower-level animacy or coherent motion perception}, year = {2011}, month = {4}, web_url = {http://www.mrc-cbu.cam.ac.uk/socialbrain2011/}, event_name = {The Social Brain Workshop 2011}, event_place = {Cambridge, UK}, state = {published}, author = {David N; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Vogeley K; Engel A} } @Book{ Armann2011, title = {Faces in the Brain: a Behavioral, Eye-tracking and High-level Adaptation Approach to Human Face Perception}, year = {2011}, pages = {167}, note = {Tübingen, Univ., Diss., 2011}, web_url = {http://www.logos-verlag.de/cgi-bin/engbuchmid?isbn=2900&lng=deu&id=}, publisher = {Logos-Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 29}, state = {published}, ISBN = {978-3-8325-2900-0}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}} } @Book{ Gaissert2011, title = {Perceiving Complex Objects: A Comparison of the Visual and the Haptic Modalities}, year = {2011}, pages = {204}, abstract = {Das wichtigste Werkzeug des Menschen sind seine Hände. Obwohl dieses Sprichwort schon sehr alt ist, ist nur wenig darüber bekannt, wie und was der Mensch wahrnimmt, wenn er Objekte in die Hand nimmt und betastet. Wie wird die ertastete Form eines Objektes im Gehirn abgespeichert? Ist die haptische Repräsentation ähnlich zu der, der visuellen Wahrnehmung? Entsteht sogar eine multisensorische, und somit gemeinsame, Repräsentation? Diese fundamentalen Fragen bilden den Hintergrund der vorliegenden Dissertation. Die hier dargestellten Experimente zeigen, dass der Mensch sehr ähnliche perzeptuelle Räume generiert, wenn komplexe Formen eines parametrisch definierten Objektraumes visuell oder haptisch exploriert werden. Um dies zu zeigen, wurde zuerst ein dreidimensionaler Objektraum muschelähnlicher Objekte generiert, welche in drei Formparametern variieren. Versuchspersonen wurden in den visuellen Versuchen Fotos oder virtuelle Rekonstruktionen der Objekte gezeigt, während in den haptischen Versuchen 3D Plastikmodelle der Objekte, generiert mit einem 3D Drucker, mit verbundenen Augen betastet wurden. In einer ersten Reihe von Experimenten bewerteten die Versuchspersonen die Ähnlichkeit zweier, nacheinander gezeigter, Objekte. Mit diesen Ähnlichkeitsbewertungen und mit Hilfe des Verfahrens der multidimensionalen Skalierung wurden die perzeptuellen Räume beider Modalitäten visualisiert. Überraschenderweise konnten die Versuchspersonen die Topologie des Objektraumes korrekt nachbilden, unabhängig davon, ob sie die Objekte gesehen oder betastet hatten. Weiterhin zeigten die Ergebnisse, dass der visuelle und der haptische perzeptuelle Raum fast identisch waren. Als nächstes wurden drei Kategorisierungsexperimente durchgeführt. Obwohl Kategorisierung allein durch den Tastsinn eher eine ungewöhnliche Aufgabe ist, konnte sie genauso gut gelöst werden, wie wenn die Versuchspersonen die Objekte sehen konnten. Anschließend wurden die perzeptuellen Räume beider Modalitäten mit den Ergebnissen der Kategorisierungsexperimente verglichen. Für alle Kategorisierungsexperimente und für beide Modalitäten war die wahrgenommene Ähnlichkeit zwischen Objekten einer Kategorie höher, als die Ähnlichkeit zweier Objekte aus unterschiedlichen Kategorien. Das heißt, dass, sowohl visuell als auch haptisch, Objekte in einer Kategorie zusammengruppiert wurden, die als sehr ähnlich wahrgenommen wurden. Um zu untersuchen, inwieweit die auf den computergenerierten Objekten basierenden Ergebnisse auf natürliche Objekte übertragbar sind, wurde eine Sammlung von Muscheln und Salzwasserschnecken erstellt. Mit diesen wurden, wie oben beschrieben, Ähnlichkeitsbewertungen durchgeführt und mittels multidimensionaler Skalierung die perzeptuellen Räume visualisiert. Wiederum waren der visuelle und der haptische perzeptuelle Raum fast identisch. Interessanterweise konnte man in beiden Räumen eine Gruppenbildung erkennen, weshalb auch hier drei Kategorisierungsexperimente durchgeführt wurden. Obwohl die Muscheln in einer Vielzahl an Objektmerkmalen variierten, z.B. Form, Farbe, Muster etc., konnten die Versuchspersonen diese Aufgabe ohne Mühe lösen, auch wenn sie die Objekte nur betasten durften. Zusätzlich konnte die Gruppenbildung, die schon in den perzeptuellen Räumen erkennbar war, die Kategorisierungsergebnisse richtig vorhersagen. Zusammengenommen weisen diese Ergebnisse darauf hin, dass die visuelle und die haptische Repräsentation von Objekten sehr eng miteinander verknüpft sein müssen. Zusätzlich liefern die Experimente Hinweise darauf, dass die gleichen Prozesse genutzt werden, wenn Ähnlichkeiten zwischen Objekten wahrgenommen werden, oder Objekte kategorisiert werden, egal ob die Objekte visuell oder haptisch exploriert werden.}, note = {Tübingen, Univ., Diss., 2011}, web_url = {http://www.logos-verlag.de/cgi-bin/engbuchmid?isbn=2794&lng=eng&id=}, publisher = {Logos-Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 26}, state = {published}, ISBN = {978-3-8325-2794-5}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}} } @Thesis{ Armann2011_2, title = {Faces in the Brain: a Behavioral, Eye-tracking and High-level Adaptation Approach to Human Face Perception}, year = {2011}, state = {published}, type = {PhD}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}} } @Thesis{ Gaissert2011_2, title = {Perceiving Complex Objects: A Comparison of the Visual and the Haptic Modalities}, year = {2011}, state = {published}, type = {PhD}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}} } @Thesis{ Chuang2011_5, title = {Recognizing Objects From Dynamic Visual Experiences}, year = {2011}, state = {published}, type = {PhD}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ 7068, title = {"Own-species" bias in the categorical representation of a human/monkey continuum in the human and non-human primate temporal lobe}, year = {2010}, month = {11}, volume = {40}, number = {581.20}, abstract = {While face categorization is a fundamental cognitive ability of human and non-human primates, its neural basis remain poorly understood. Using a new morphing technique, we created realistic three-dimensional morphed faces that linearly span the continuum between humans and monkeys (“species” continuum). Extensive categorization and discrimination experiments in human observers show that humans perceive the “species” continuum categorically. Moreover, the position of the categorical boundary is shifted from the center towards the human end of the continuum, suggesting a higher sensitivity to changes near the own-species prototype. We presented a subset of these faces to human subjects in a block-design fMRI experiment to record BOLD signals from the temporal lobe while participants performed an unrelated task at fixation. We applied a multivariate approach based on (Pearson) correlations to compute the difference between activity patterns elicited by faces along the continuum. Using this method, we looked for a categorical representation in face selective areas previously defined using an independent, standard "Face-localizer" experiment. Consistent with the psychophysical results, we found a categorical response with a bias towards the human end of the stimulus continuum in the activation patterns of the left human STS. In addition, activation in human ventral temporal cortex was most sensitive to deviations from the human prototype. To look for similar effects in monkeys, we applied an equivalent multivariate approach to analyze extracellular signals from a population of neurons recorded from the STS of two macaque monkeys while they fixated at the same type of faces. Additionally, the position of the perceptual category boundary was determined with a preferential-looking-time experiment. In both behavioral and neuronal monkey data, we found a categorical representation of the continuum, but in this case, with a bias towards the monkey end of the continuum. Our results demonstrate the neural basis of categorical representation of a facial attribute in the human and non-human primate brain. Together, our findings suggest that experience can lead to significant shifts in category boundary for face stimuli.}, web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main}, event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Sigala Alanis GR{sigala}{Department Physiology of Cognitive Processes}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Rainer G{gregor}{Department Physiology of Cognitive Processes}} } @Poster{ SchultzBP2011_2, title = {What human brain regions like about moving faces}, year = {2010}, month = {11}, volume = {40}, number = {393.10}, abstract = {Visual perception of moving faces activates parts of the human superior temporal sulcus (STS) whereas static facial information is mainly processed in areas of ventral temporal and lateral occipital cortex. However, recent findings show that the latter regions also respond more to moving faces than to static faces (Schultz and Pilz, 2009). This study investigated the origin of this activation increase. We considered the following causes: (1) facial motion per se (2) increased static information due to the higher number of frames constituting the movie stimuli, and/or (3) increased attention towards moving faces, which would increase the response in face-sensitive areas through top-down modulation. We presented non-rigidly moving faces to participants lying in an fMRI scanner. We manipulated static face information and fluidity of the motion in the stimuli by presenting ordered and scrambled sequences of frames at temporal resolutions of 25 frames per second (fps) which were perceived as fluid motion when ordered, but as very non-fluid when scrambled, 12.5 fps (still perceived as fluid when ordered) and 5 fps (significantly less fluid when ordered, with smallest effect of scrambling). To control the influence of attention, subjects were asked to perform a target detection task that was unrelated to the face stimuli (one-back matching task on a stream of letters presented at fixation). Results confirm the increased activation induced by facial motion in the face-sensitive fusiform and superior temporal regions. A purely attention-based effect can be ruled out given that the task performance was far from ceiling performance and equal across conditions. While activation generally increased with the number of frames, a significant reduction of activation was observed due to frame-scrambling the stimuli. These results indicate that the activation increase induced by moving faces is due to the motion of the stimulus (i.e. temporal order of the frames) and not only to increased static information or attentional modulation.}, web_url = {http://www.sfn.org/am2010/index.aspx?pagename=final_program}, event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Brockhaus M{mabrockhaus}{Department Human Perception, Cognition and Action}; Pilz K{kpilz}{Department Human Perception, Cognition and Action}} } @Conference{ KaulardWdB2010, title = {Cognitive categories of emotional and conversational facial expressions are influenced by dynamic information}, year = {2010}, month = {10}, volume = {11}, pages = {16}, abstract = {Most research on facial expressions focuses on static, ’emotional’ expressions. Facial expressions, however, are also important in interpersonal communication (’conversational’ expressions). In addition, communication is a highly dynamic phenomenon and previous evidence suggests that dynamic presentation of stimuli facilitates recognition. Hence, we examined the categorization of emotional and conversational expressions using both static and dynamic stimuli. In a between-subject design, 40 participants were asked to group 55 different facial expressions (either static or dynamic) of ten actors in a free categorization task. Expressions were to be grouped according to their overall similarity. The resulting confusion matrix was used to determine the consistency with which facial expressions were categorized. In the static condition, emotional expressions were grouped as separate categories while participants confused conversational expressions. In the dynamic condition, participants uniquely categorized basic and sub-ordinate emotional, as well as several conversational facial expressions. Furthermore, a multidimensional scaling analysis suggests that the same potency and valence dimensions underlie the categorization of both static and dynamic expressions. Basic emotional expressions represent the most effective categories when only static information is available. Importantly, however, our results show that dynamic information allows for a much more fine-grained categorization and is essential in disentangling conversational expressions.}, event_name = {11th Conference of Junior Neuroscientists of Tübingen (NeNa 2010)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 6643, title = {Visual and Haptic Perceptual Spaces Show High Similarity in Humans}, journal = {Journal of Vision}, year = {2010}, month = {9}, volume = {10}, number = {11:2}, pages = {1-20}, abstract = {In this study, we show that humans form highly similar perceptual spaces when they explore complex objects from a parametrically defined object space in the visual and haptic domains. For this, a three-dimensional parameter space of well-defined, shell-like objects was generated. Participants either explored two-dimensional pictures or three-dimensional, interactive virtual models of these objects visually, or they explored three-dimensional plastic models haptically. In all cases, the task was to rate the similarity between two objects. Using these similarity ratings and multidimensional scaling (MDS) analyses, the perceptual spaces of the different modalities were then analyzed. Looking at planar configurations within this three-dimensional object space, we found that active visual exploration led to a highly similar perceptual space compared to passive exploration, showing that participants were able to reconstruct the complex parameter space already from two-dimensional pictures alone. Furthermore, we found that visual and haptic perceptual spaces had virtually identical topology compared to that of the physical stimulus space. Surprisingly, the haptic modality even slightly exceeded the visual modality in recovering the topology of the complex object space when the whole three-dimensional space was explored. Our findings point to a close connection between visual and haptic object representations and demonstrate the great degree of fidelity with which haptic shape processing occurs.}, web_url = {http://www.journalofvision.org/content/10/11/2.full.pdf+html}, state = {published}, DOI = {10.1167/10.11.2}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6656, title = {Towards Artificial Systems: What Can We Learn from Human Perception?}, year = {2010}, month = {9}, pages = {1-3}, abstract = {Research in learning algorithms and sensor hardware has led to rapid advances in artificial systems over the past decade. However, their performance continues to fall short of the efficiency and versatility of human behavior. In many ways, a deeper understanding of how human perceptual systems process and act upon physical sensory information can contribute to the development of better artificial systems. In the presented research, we highlight how the latest tools in computer vision, computer graphics, and virtual reality technology can be used to systematically understand the factors that determine how humans perform in realistic scenarios of complex task-solving.}, file_url = {/fileadmin/user_upload/files/publications/PRICAI-62300001%20(1)_6656[0].pdf}, web_url = {http://www.pricai2010.org/default.asp}, editor = {Zhang, B.-T. , M. A. Orgun}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 6230}, booktitle = {PRICAI 2010: Trends in Artificial Intelligence}, event_name = {11th Pacific Rim International Conference on Artificial Intelligence}, event_place = {Daegu, South Korea}, state = {published}, ISBN = {978-3-642-15246-7}, DOI = {10.1007/978-3-642-15246-7_1}, author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangBBF2010, title = {Measuring unrestrained gaze on wall-sized displays}, year = {2010}, month = {8}, pages = {347-348}, abstract = {Motivation -- Natural gaze involves the coordinated movements of eye, head and torso. This allows access to a wide field of view, up to a range of 260° (Chen, Solinger, Poncet & Lancet, 1999). The recent increase in large displays places a demand on being able to track a mobile user's gaze over this extensive range. Research approach -- We developed an extensible system for measuring the gaze of users on wall-sized displays. Our solution combines the inputs of a conventional head-mounted eyetracker (Eyelink2©, SR Research) and motion-capture system (Vicon MX©, Vicon), to provide real-time measurements of a mobile user's gaze in 3D space. Findings/Design -- The presented system serves as a single platform for studying user behavior across a wide range of tasks: single-step saccade shifts, free-viewing of natural scenes, visual search and gaze-assisted user interfaces. Importantly, it allows eye- and head-movements to be separately measured without compromising the accuracy of combined gaze measurements. Take away message -- Unrestrained gaze movements on a large display can be accurately measured by suitably combining the inputs of conventional eye- and body-tracking hardware.}, web_url = {http://ecce2010.tudelft.nl/}, editor = {Neerincx, W. , W-P Brinkman}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {28th Annual European Conference on Cognitive Ergonomics (ECCE '10)}, event_place = {Delft, Netherlands}, state = {published}, ISBN = {978-1-60558-946-6}, DOI = {10.1145/1962300.1962379}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}} } @Poster{ 6606, title = {Does adding a visual task component affect fixation accuracy?}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {35}, abstract = {Video-based eye-trackers are typically calibrated by instructing participants to fixate a series of dots, the physical locations of which are known to the system. Unfortunately, this procedure does not verify if fixation has actually occurred at the desired locations. This limitation can be remedied by requiring participants to perform a simple visual discrimination task at each location, thus mandating accurate fixation. Still, it remains an open question whether this modification could affect fixation accuracy. In the current study, we compared the accuracy of fixations that were performed with a visual discrimination task and those without such a requirement. Participants either identified the orientation of a small Landolt C (size = 0.1°) or fixated a similar probe without performing the task. Results indicate that participants fixated equally well in both tasks (mean diff. of abs. error = 0.01°, Bayes factor B01 = 4.0 with JZS prior, see [Rouder et al., 2009, Psychonomic Bulletin &am p;am p;am p; R eview, 16(2), 225-237]). Given this, we propose the implementation of this visual discrimination task to eye-tracking calibration protocols as it elicits verifiable fixations without compromising fixation accuracy.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 6787, title = {No other-race effect found in a task using faces differering only in race-specifying information}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {90}, abstract = {Generally, faces of one’s own ethnicity are better remembered than faces of another race. The mechanisms of this other race effect (ORE) are still unresolved. The present study investigates whether expertise for own-race results in ORE in a discrimination task when only race-specifying information varies between faces, with no interference of identity change and no memory load. If expertise is an important factor for ORE, Caucasian participants, for example, should better discriminate between two Caucasian faces presented side by side than between two Asian faces. We tested participants in Seoul and Tübingen with pairs of Asian or Caucasian faces. Their task was to tell which face of the pair was either more Asian or more Caucasian. Although we found that Asian face pairs were unexpectedly but consistently better discriminated than Caucasian faces, this Asian advantage did not differ between both city groups. Our results show furthermore that Seoul and Tübingen participants’ discrimination performance was similar for Asian and Caucasian faces. These findings suggests that when there is no memory component involved in the task and when face appearance only differs in race-specifying information, own-race expertise does not result in better performance for own-race faces.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Lee RK{ryokyung}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ Schultz2010, title = {On the role of attention and eye movements for the perception of animacy from a single moving object}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {19}, abstract = {We previously developped stimuli allowing parametric control over the percept of animacy evoked by the movements of a single object, without contribution from spatial arrangement, shape or structure of the object (Schultz and Dopjans, 2008 Perception 35 ECVP Supplement, 154). As observers tend to follow the stimulus with their eyes while performing animacy judgments, we quantified these eye movements in the present study (Experiment 1). In Experiment 2, we tested the importance of eye movements and attention for task performance by forcing subjects to fixate while judging animacy. In Experiment 3, attentional resources were further reduced by asking subjects to perform a secondary task at fixation while judging animacy. Experiment 1 showed that the distance between eye fixations and the stimulus increased with changes in animacy, compatible with a greater difficulty in following animate-looking stimuli. Combined results across experiments show that the strength of the changes in percept tends to be reduced with fixed gaze and is significantly decreased in the dual-task setting. In the latter, the greatest disruption in stimulus processing appears to result from detecting and reporting the fixation targets rather than just splitting attentional resources. These results suggest that at least partially sustained attention is required for animacy judgments about our single moving dot stimulus.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ 6751, title = {Whole-brain fMRI using repetition suppression between action and perception reveals cortical areas with mirror neuron properties}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {54}, abstract = {Mirror Neurons (MN) have been suggested to be the supporting neural mechanism for action recognition and understanding. However, there is a current debate about the localization of MN in humans. Functional magnetic resonance imaging (fMRI) studies using repetition suppression (RS) paradigms for the identification of MN provide mixed results. Studies supporting the existence of MN restricted their analysis to a-priori candidate regions, whereas studies that failed to find evidence used non-object-directed actions. In the present fMRI study, we tackled these limitations by using object-directed actions in a RS paradigm and performing a wholebrain analysis. Subjects observed and executed simple grasping movements differing only in their goal-directness (grasping a button vs. grasping beside it). MN areas should be (1) more activated by goal-directed actions and (2) exhibit RS between execution and observation of the same action. The analysis revealed three significant cortical clusters in the right anterior intraparietal sulcus (aIPS), right primary somatosensory cortex and left premotor cortex that show these characteristics. While the aIPS has been reported before as a possible region for MN, the other two clusters haven’t been associated with MN directly yet using RS paradigms. We discuss the potential contribution of these regions to object-directed actions.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Schillinger F{frieder}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Uludag K} } @Conference{ 6740, title = {Cognitive categories of emotional and conversational facial expressions are influenced by dynamic information}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {157}, abstract = {Most research on facial expressions focuses on static, ‘emotional’ expressions. Facial expressions, however, are also important in interpersonal communication (‘conversational’ expressions). In addition, communication is a highly dynamic phenomenon and previous evidence suggests that dynamic presentation of stimuli facilitates recognition. Hence, we examined the categorization of emotional and conversational expressions using both static and dynamic stimuli. In a between-subject design, 40 participants were asked to group 55 di erent facial expressions (either static or dynamic) of ten actors in a free categorization task. Expressions were to be grouped according to their overall similarity. The resulting confusion matrix was used to determine the consistency with which facial expressions were categorized. In the static condition, emotional expressions were grouped as separate categories while participants confused conversational expressions. In the dynamic condition, participants uniquely categorized basic and sub-ordinate emotional, as well as several conversational facial expressions. Furthermore, a multidimensional scaling analysis suggests that the same potency and valence dimensions underlie the categorization of both static and dynamic expressions. Basic emotional expressions represent the most e ective categories when only static information is available. Importantly, however, our results show that dynamic information allows for a much more fine-grained categorization and is essential in disentangling conversational expressions.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 6726, title = {Faces are represented relative to race-specific norms}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {155}, abstract = {Recent models of face perception often adopt a framework in which faces are represented as points in a multidimensional space, relative to the average face that serves as a norm. Faces share many visual properties and could be encoded in one face space against one single norm. However, certain face properties may result in grouping of similar faces. How faces might be ‘subclassified’ in face space remains thus to be determined. We studied the processing of faces of different races, using high-level aftereffects, where exposure to one face systematically distorts the perception of a subsequently viewed face towards the ‘opposite’ identity in face space. We measured identity aftereffects for adapt-test pairs that were opposite race-specific (Asian and Caucasian) averages and pairs that were opposite a ‘generic’ average (both races morphed together). Aftereffects were larger for race-specific than for generic anti-faces. Since adapt-test pairs that lie opposite each other in face space generate larger aftereffects than non-opposite test pairs, these results suggest that Asian and Caucasian faces are coded using race-specific norms. Moreover, identification (at low identity strength) of the target faces was easier around the race-specific norms than around the generic norm, indicating that norms also have a functional role in face processing.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Armann RGM{armann}{Department Human Perception, Cognition and Action}; Jeffery L; Calder AJ; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Rhodes G} } @Conference{ Bulthoff2010_2, title = {Interplay between identity and sex recognition in familiar faces}, year = {2010}, month = {6}, day = {18}, web_url = {http://www.zkw.uni-bremen.de/veranstaltungen.php?arc=1&lang=de}, event_name = {Universität Bremen: Zentrum für Kognitionswissenschaften (ZKW)}, event_place = {Bremen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 6818, title = {Categorical Representation of a Human/Monkey Face Continum in the Human and Non-Human Primate Temporal Lobe}, year = {2010}, month = {6}, volume = {2010}, pages = {93}, abstract = {Categorization of faces is fundamental for social interactions of primates. To understand its neural basis, we investigate how human and monkey face categories are represented in both the human and non-human primate brain. As stimuli, we use realistic three-dimensional morphed faces that linearly span the continuum between humans and monkeys (Fig. 1A). Extensive behavioral tests in both species revealed categorical perception with a shift of the categorical boundary towards the own species (Fig. 1B). This suggests that both species perceive the same stimulus continuum in a fundamentally different way. During a fixation task, we recorded from the temporal lobe extracellular signals in monkeys and BOLD signals in humans. To analyze the data, we used a multivariate pattern classifier approach based on Support Vector Machines and correlations. Consistent with the psychophysical results, we found an "own-species" bias in the categorical representation of human and monkey faces at the level of single neurons as well as in the population response in the inferior temporal lobe of the monkey. (Fig. 1C). Symmetrically, we found a categorical response with an ownspecies bias in the activation patterns of the left human STS. In addition, human ventral temporal cortex showed a higher sensitivity for human faces. Our results are the first to demonstrate the neural basis of categorical representation of a facial attribute in the primate brain. In addition, our data show that both psychophysical and neuronal data can show categorical boundary shifts indicative of the behavioral relevance of prototypical categories.}, web_url = {http://www.areadne.org/2010/home.html}, editor = {Hatsopoulos, N. G., S. Pezaris}, event_name = {AREADNE 2010: Research in Encoding And Decoding of Neural Ensembles}, event_place = {Santorini, Greece}, state = {published}, author = {Sigala R{sigala}{Department Physiology of Cognitive Processes}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Rainer G{gregor}{Department Physiology of Cognitive Processes}} } @Poster{ SchultzB2010, title = {How does the brain identify living things based on their motion?}, year = {2010}, month = {6}, volume = {16}, number = {194 MT-PM}, pages = {62}, web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageid=1}, event_name = {16th Annual Meeting of the Organisation for Human Brain Mapping (HBM 2010)}, event_place = {Barcelona, Spain}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Bulthoff2010, title = {Die Wechselwirkung von Identität und Geschlecht bei der Gesichtswahrnehmung}, year = {2010}, month = {6}, pages = {16}, abstract = {Wir untersuchten die Wechselwirkung von identittäts- und geschlechtsspezififischen Informationen in der Gesichtswahrnehmung. In Experiment 1 war es die Aufgabe der Probanden, die Originalversion eines bekannten Gesichts neben einer Anzahl modifizierter Versionen dieses Gesichts auszuwählen. Diese Aufgabe war leichter, wenn das Originalgesicht zusammen mit identitätsmodifizierten statt mit geschlechtsmodifizierten Versionen des Originalgesichts präsentiert wurde, was den Schluss nahelegt, dass geschlechtsspezifische Informationen nicht akkurat im Gedächtnis abgelegt werden. In Experiment 2, modifizierten wir das Geschlecht einer Reihe von Gesichtern, indem wir weibliche Gesichter in männliche Gesichter transformierten. Probanden hatten grössere Schwierigkeiten, diese geschlechtsmodififizierten Gesichter als männliche Gesichter zu klassifizieren, wenn ihnen das Originalgesicht bekannt war. Im Gegensatz zum klassischen Modell der Gesichtsverarbeitung nach Bruce & Young (1986), weisen unsere Daten darauf hin, dass bei Gesichtern geschlechtspezififische Informationen nicht unabhängig von identitätsspezifischen Informationen verarbeitet werden. Zusammenfassend können wir sagen, dass unser visuelles System nicht unbedingt darauf angelegt zu sein scheint, perfekte Modelle auch bekannter Gesichter zu speichern {besonders nicht im Hinblick auf das Geschlecht. Vermutlich ist eine akkurate Geschlechtsinformation nicht notwendig - vor allem da sie nur aus den beiden Klassen männlich und weiblich besteht - so dass identitätsspezfifische Informationen geschlechtsspezifische Informationen dominieren, auch wenn dies nicht zweckmässig ist.}, event_name = {36. Tagung Psychologie und Gehirn (PuG 2010)}, event_place = {Greifswald, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Inproceedings{ PastraWSVK2010, title = {The POETICON Corpus: Capturing Language Use and Sensorimotor Experience in Everyday Interaction}, year = {2010}, month = {5}, pages = {3031-3036}, abstract = {Natural language use, acquisition, and understanding takes place usually in multisensory and multimedia communication environments. Therefore, for one to model language in its interaction and integration with sensorimotor experiences, one needs a representative corpus of such interplay. In this paper, we will present the first corpus of language use and sensorimotor experience recordings in everyday human:human interaction, in which spontaneous language communication has been recorded along with corresponding multiview video recordings, recordings of 3D full body kinematics, and 3D tracking of objects in focus. It is a twelve-hour corpus which comprises of six everyday human:human interaction scenes, each one performed 3 times by 4 different English-speaking couples (interaction between a male and a female actor), each couple acting each scene in two settings: a fully naturalistic setting in which 5-camera multi-view video recordings take place, and a high-tech setting, with full body motion capture for both individuals, a 2-camera multiview video recording, and 3D tracking of focus objects. The corpus has been developed within an EU-funded cognitive systems research project, POETICON (http://www.poeticon.eu), and represents a new type of language resources for cognitive systems. Namely, a corpus that reveals the dynamic role of language in its interplay with sensorimotor experiences and which allows one to computationally model this interplay.}, file_url = {fileadmin/user_upload/files/publications/LREC-2010-Pastra.pdf}, web_url = {http://www.lrec-conf.org/lrec2010/}, web_url2 = {https://www.semanticscholar.org/paper/The-POETICON-Corpus-Capturing-Language-Use-and-Sen-Pastra-Wallraven/6108ff286a7eea4345ef2562a8d3b63d3bb38400}, editor = {Calzolari , N. , K. Choukri, B. Maegaard, J. Mariani, J. Odijk, S. Piperidis, M. Rosner, D. Tapias}, publisher = {ELRA}, address = {Paris, France}, event_name = {Seventh International Conference on Language Resources and Evaluation (LREC 2010)}, event_place = {Valletta, Malta}, state = {published}, ISBN = {2-9517408-6-7}, author = {Pastra K; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Schultze M{mschultze}{Department Human Perception, Cognition and Action}; Vatakis A; Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Poster{ 6739, title = {Laying the foundations for an in-depth investigation of the whole space of facial expressions}, journal = {Journal of Vision}, year = {2010}, month = {5}, volume = {10}, number = {7}, pages = {606}, abstract = {Facial expressions form one of the most important and powerful communication systems of human social interaction. They express a large range of emotions but also convey more general, communicative signals. To date, research has mostly focused on the static, emotional aspect of facial expression processing, using only a limited set of “generic” or “universal” expression photographs, such as a happy or sad face. That facial expressions carry communicative aspects beyond emotion and that they transport meaning in the temporal domain, however, has so far been largely neglected. In order to enable a deeper understanding of facial expression processing with a focus on both emotional and communicative aspects of facial expressions in a dynamic context, it is essential to first construct a database that contains such material using a well-controlled setup. We here present the novel MPI facial expression database, which contains 20 native German participants performing 58 expressions based on pre-defined context scenarios, making it the most extensive database of its kind to date. Three experiments were performed to investigate the validity of the scenarios and the recognizability of the expressions. In Experiment 1, 10 participants were asked to freely name the facial expressions that would be elicited given the scenarios. The scenarios were effective: 82% of the answers matched the intended expressions. In Experiment 2, 10 participants had to identify 55 expression videos of 10 actors. We found that 34 expressions could be identified reliably without any context. Finally, in Experiment 3, 20 participants had to group the 55 expression videos of 10 actors based on similarity. Out of the 55 expressions, 45 formed consistent groups, which highlights the impressive variety of conversational expressions categories we use. Interestingly, none of the experiments found any advantage for the universal expressions, demonstrating the robustness with which we interpret conversational facial expressions.}, web_url = {http://www.journalofvision.org/content/10/7/606}, event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/10.7.606}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Cunningham DW{dwc}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 6728, title = {Race-specific norms for coding face identity and a functional role for norms}, journal = {Journal of Vision}, year = {2010}, month = {5}, volume = {10}, number = {7}, pages = {706}, abstract = {High-level perceptual aftereffects have revealed that faces are coded relative to norms that are dynamically updated by experience. The nature of these norms and the advantage of such a norm-based representation, however, are not yet fully understood. Here, we used adaptation techniques to get insight into the perception of faces of different race categories. We measured identity aftereffects for adapt-test pairs that were opposite a race-specific average and pairs that were opposite a ‘generic’ average, made by morphing together Asian and Caucasian faces. Aftereffects were larger following exposure to anti-faces that were created relative to the race-specific (Asian and Caucasian) averages than to anti-faces created using the mixed-race average. Since adapt-test pairs that lie opposite to each other in face space generate larger identity aftereffects than non-opposite test pairs, these results suggest that Asian and Caucasian faces are coded using race-specific norms. We also found that identification thresholds were lower when targets were distributed around the race-specific norms than around the mixed-race norm, which is also consistent with a functional role for race-specific norms.}, web_url = {http://www.journalofvision.org/content/10/7/706.abstract}, event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/10.7.706}, author = {Armann RGM{armann}{Department Human Perception, Cognition and Action}; Jeffery L; Calder A; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Rhodes G} } @Inproceedings{ 6292, title = {Perceptual representations of parametrically-defined and natural objects comparing vision and haptics}, year = {2010}, month = {4}, pages = {35-42}, abstract = {Studies concerning how the brain might represent objects by means of a perceptual space have primarily focused on the visual domain. Here we want to show that the haptic modality can equally well recover the underlying structure of a physical object space, forming a perceptual space that is highly congruent to the visual perceptual space. By varying three shape parameters a physical shape space of shell-like objects was generated. Sighted participants explored pictures of the objects while blindfolded participants haptically explored 3D printouts of the objects. Similarity ratings were performed and analyzed using multidimensional scaling (MDS) techniques. Visual and haptic similarity ratings highly correlated and resulted in very similar visual and haptic MDS maps. To investigate to which degree these results are transferrable to natural objects, we performed the same visual and haptic similarity ratings and multidimensional scaling analyses using a set of natural sea shells. Again, we found very similar per ceptual spaces in the haptic and visual domain. Our results suggest that the haptic modality is capable of surprisingly acute processing of complex shape.}, file_url = {/fileadmin/user_upload/files/publications/Haptics2010-Gaissert_6292[0].pdf}, web_url = {http://www.hapticssymposium.org/next_conference.html}, editor = {Colgate, J. E., S. Lederman, D. Prattichizzo}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE Haptics Symposium 2010}, event_place = {Waltham, MA, USA}, state = {published}, ISBN = {978-1-424-46821-8}, DOI = {10.1109/HAPTIC.2010.5444683}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Article{ 6402, title = {Brain Imaging: Decoding Your Memories}, journal = {Current Biology}, year = {2010}, month = {3}, volume = {20}, number = {6}, pages = {R269-R271}, abstract = {Recent advances in neuroimaging allow mental states to be inferred from non-invasive data. In a new study, memories of complex events were successfully decoded solely from imaged activation in a memory-related brain structure.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-4YNKHPX-D-4&_cdi=6243&_user=29041&_pii=S0960982210001314&_orig=search&_coverDate=03%2F23%2F2010&_sk=999799993&view=c&wchp=dGLzVtz-zSkWz&md5=91beb337f50434cb758048e520c0abb5&ie=/sdarticle.pdf}, state = {published}, DOI = {10.1016/j.cub.2010.02.001}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6246, title = {Eye and Pointer Coordination in Search and Selection Tasks}, year = {2010}, month = {3}, pages = {89-92}, abstract = {Selecting a graphical item by pointing with a computer mouse is a ubiquitous task in many graphical user interfaces. Several techniques have been suggested to facilitate this task, for instance, by reducing the required movement distance. Here we measure the natural coordination of eye and mouse pointer control across several search and selection tasks. We find that users automatically minimize the distance to likely targets in an intelligent, task dependent way. When target location is highly predictable, top-down knowledge can enable users to initiate pointer movements prior to target fixation. These findings ques-tion the utility of existing assistive pointing techniques and suggest that alternative approaches might be more effective.}, file_url = {/fileadmin/user_upload/files/publications/ETRA2010-Bieg_6246[0].pdf}, web_url = {http://etra.cs.uta.fi/}, editor = {Morimoto, C. H., H. Istance, A. Hyrskykari, Q. Ji}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {Symposium on Eye Tracking Research and Applications (ETRA 2010)}, event_place = {Austin, TX, USA}, state = {published}, ISBN = {978-1-60558-994-7}, DOI = {10.1145/1743666.1743688}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}; Reiterer H; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6293, title = {Visual and Haptic Perceptual Spaces From Parametrically-Defined to Natural Objects}, year = {2010}, month = {3}, pages = {2-7}, abstract = {In this study we show that humans form very similar perceptual spaces when they explore parametrically-defined shell-shaped objects visually or haptically. A physical object space was generated by varying three shape parameters. Sighted participants explored pictures of these objects while blindfolded participants haptically explored 3D printouts of the objects. Similarity ratings were performed and analyzed using multidimensional scaling (MDS) techniques. Visual and haptic similarity ratings highly correlate and resulted in very similar visual and haptic MDS maps providing evidence for one shared perceptual space underlying both modalities. To investigate to which degree these results are transferrable to natural objects, we performed the same visual and haptic similarity ratings and multidimensional scaling analyses using a set of natural sea shells.}, file_url = {/fileadmin/user_upload/files/publications/AAAI-2010-Gaissert_6293[0].pdf}, web_url = {https://www.aaai.org/ocs/index.php/SSS/SSS10/paper/viewFile/1082/1376}, editor = {Barkowsky, T. , S. Bertel, C. Hölscher, T. F. Shipley}, publisher = {AAAI Press}, address = {Menlo Park, CA, USA}, event_name = {AAAI 2010 Spring Symposium on Cognitive Shape Processing}, event_place = {Stanford, CA, USA}, state = {published}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Ulrichs K{ulrichs}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Inbook{ Bulthoff2010_4, title = {Recognition}, year = {2010}, pages = {863-864}, abstract = {In a broad sense, the term recognition refers to the explicit feeling of familiarity that occurs when, for example, we view an object or hear a voice that we have experienced previously. It has been widely investigated in the visual domain, and this entry is thus based mainly on this field of research. By definition, accurate recognition can only occur for objects or sets of objects that we have experienced (seen) in the past. Recognition is fundamental to interpreting perceptual experiences, as it gives explicit meaning to our visual input.}, web_url = {https://ia601806.us.archive.org/25/items/Encyclopedia_of_Perception_Volume_1_and_2/Encyclopedia_of_Perception_Volume_1_and_2.pdf}, editor = {Goldstein, E.B.}, publisher = {Sage}, address = {Los Angeles, CA, USA}, booktitle = {Encyclopedia of Perception}, state = {published}, ISBN = {978-1-4129-4081-8}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ AzulayDA2009, title = {Patterns of activity during haptic face recognition: an fMRI study}, journal = {Journal of Molecular Neuroscience}, year = {2009}, month = {11}, day = {22}, volume = {39}, number = {Supplement 1}, pages = {S11}, abstract = {Major focus in neuroimaging regarding Face processing was made to the visual modality providing evidence for specific processing areas that involve with perceptual expertise. Growing evidence suggests that humans can also learn and recognize faces by touch alone. However, little is known about the neural correlates underlying such haptic face recognition. In order to investigate this we scanned 9 healthy subjects, using 3 T fMRI while they performed a previously described face encoding-retrieval task. Briefly, in an old/new recognition task, participants were haptically introduced to three facemasks (3D small scale resin made human face masks) while inside the scanner. This encoding phase was immediately followed by a retrieval phase in which participants were presented with the three former introduced masks and three other similar comparison facemasks consecutively in pseudorandom order. Subjects were instructed to determine whether the mask was introduced in the encoding phase or not. in the poster we will cover the neural BOLD correlates of face encoding and face retrieval.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs12031-009-9309-1.pdf}, event_name = {18th Annual Meeting of the Israel Society for Neuroscience (ISFN 2009)}, event_place = {Eilat, Israel}, state = {published}, DOI = {10.1007/s12031-009-9309-1}, author = {Azulay H; Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Amedi A} } @Conference{ ArmannR2009, title = {How does the brain code the race of faces?}, year = {2009}, month = {11}, volume = {9}, pages = {10}, abstract = {High-level perceptual aftereffects have revealed that faces are coded relative to norms, supposedly via some sort of opponent coding mechanism that is dynamically updated by experience. The nature of these norms and the advantage of such a norm-based representation, however, are not yet fully understood. Here, we used high-level adaptation techniques to get insight into the perception of faces of different race categories. We compared the size of identity aftereffects (AEs) for pairs of adapt and test faces that were taken from different morph trajectories, based on potential norms for Asian and Caucasian faces. Larger aftereffects were found following exposure to anti-faces created relative to averages of the race of the target identities, than to anti-faces created using a generic average. Since adapt-test pairs lying opposite to each other in face space generate larger identity AEs than non-opposite test pairs, this suggests that Asian and Caucasian faces are coded using race-specific norms, rather than a generic one. Moreover, we find that identification performance is better for face morphs that are created using these race-specific norms, independent of their actual identity strength. To our knowledge, this is the first evidence for a functional benefit of norms in face recognition.}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_name = {10th Conference of Junior Neuroscientists of Tübingen (NeNa 2009)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; Rhodes G} } @Conference{ KaulardWCB2009, title = {Laying the foundations for an in-depth investigation of the whole space of facial expressions}, year = {2009}, month = {11}, volume = {10}, pages = {11}, abstract = {Compared to other species, humans have developed highly sophisticated communication systems for social interaction. One of the most important communication systems is based on facial expressions, which are both used for expressing emotions and conveying intentions. Starting already at birth, humans are trained to process faces and facial expressions, resulting in a high degree of perceptual expertise for face perception and social communication. To date, research has mostly focused on the emotional aspect of facial expression processing, using only a very limited set of „generic“ or „universal“ expressions, such as happiness or sadness. The important communicative aspect of facial expressions, however, has so far been largely neglected. Furthermore, the processing of facial expressions is influenced by dynamic information (e. g. Fox et al., 2009). However, almost all studies so far have used static expressions and thus were studying facial expressions in an ecologically less valid context (O’Toole et al., 2004). In order to enable a deeper understanding of facial expression processing it therefore seems crucial to investigate the emotional and communicative aspects of facial expressions in a dynamic context. For these investigations it is essential to first construct a database that contains such material using a well-controlled setup. In this talk, we will present the novel MPI facial expression database, which to our knowledge is the most extensive database of this kind up to date. Furthermore, we will briefly present psychophysical experiments with which we investigated the validity of our database, as well as the recognizability of a large set of facial expressions.}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_name = {10th Conference of Junior Neuroscientists of Tübingen (NeNa 2009)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Cunningham DW{dwc}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 5824, title = {Cross-Modal Transfer in Visual and Haptic Face Recognition}, journal = {IEEE Transactions on Haptics}, year = {2009}, month = {10}, volume = {200}, number = {4}, pages = {236-240}, abstract = {We report four psychophysical experiments investigating cross-modal transfer in visual and haptic face recognition. We found surprisingly good haptic performance and cross-modal transfer for both modalities. Interestingly, transfer was asymmetric depending on which modality was learned first. These findings are discussed in relation to haptic object processing and face processing.}, web_url = {http://www2.computer.org/portal/web/csdl/doi/10.1109/TOH.2009.18}, state = {published}, DOI = {10.1109/TOH.2009.18}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 6291, title = {How does the brain identify living things based on their motion?}, year = {2009}, month = {10}, volume = {39}, number = {380.15}, abstract = {Animals (including humans) have to identify living moving things in the environment: these could be prey, enemies or mates and interactions with them should be actively controlled. Living things could be detected visually through their shape or their motion, or both. When shape is hard to see (fog, twilight, great distance, small animal), motion becomes an important cue. Biological motion has been studied widely using point-light displays, but these displays appear to contain some sort of shape or form information that influences recognition. To study the neural correlates of the detection of living entities from motion alone, we developed a stimulus consisting of a single moving dot, thus eliminating all possible sources of information about form, spatial arrangement, shape or structure of the object. Our single dot moved such that it appeared either self-propelled (modelled on the movements of a fly) or moved by an external force (modelled on a leaf drifting in the wind). Both types of movement were built using the same equation but differed in speed and acceleration profiles according to a small set of parameters. Low-level stimulus characteristics of the stimuli (range of positions on the screen, average speed, overall aspect of the trajectory) were kept as constant as possible. The parameters could be varied in a continuous fashion to create morphs between the self-propelled and externally-moved extremes. Consistent with expectations, behavioral experiments showed that self-propelled stimuli were perceived as more animate (= more likely to be alive) than the externally-moved stimuli, with a gradual transition occurring in the intermediary morphs. The extreme stimuli and four intermediary morphs were presented in an fMRI experiment to participants who had to categorize the stimuli into alive and non-alive. Using separate functional localizers, we located areas hMT+/V5 and the superior temporal sulcus region responding to point-light walkers, and found that neither region showed changes in BOLD response following the changes in percept. However, BOLD response in a region of the left posterior superior parietal cortex scaled with the degree of perceived animacy. This suggests that the STS is not simply a detector of all kinds of animate motion, but might only be implicated when some sort of shape information in the stimuli (as with point-light displays or with interacting dots) is contributing to the percept of animacy.}, web_url = {http://www.abstractsonline.com/Plan/ViewAbstract.aspx?sKey=f5dec3fb-2bb6-482d-8553-db756136f1a1&cKey=e2a39a05-441a-496b-8048-e8c4ae6e6a46}, event_name = {39th Annual Meeting of the Society for Neuroscience (Neuroscience 2009)}, event_place = {Chicago, IL, USA}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6101, title = {Gaze-Assisted Pointing for Wall-Sized Displays}, year = {2009}, month = {8}, pages = {9-12}, abstract = {Previous studies have argued for the use of gaze-assisted pointing techniques (MAGIC) in improving human-computer interaction. Here, we present experimental findings that were drawn from human performance of two tasks on a wall-sized display. Our results show that a crude adoption of MAGIC across a range of complex tasks does not increase pointing performance. More importantly, a detailed analysis of user behavior revealed several issues that were previously ignored (such as, interference of corrective saccades, increased decision time due to variability of precision, errors due to eye-hand asynchrony, and interference with search behavior) which should influence the development of gaze-assisted technology.}, web_url = {http://www.interact2009.org/}, editor = {Gross, T. , J. Gulliksen, P. Kotze, L. Oestreicher, P. Palanque, R. Oliveira Prates, M. Winckler}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 5727}, booktitle = {Human-Computer Interaction - INTERACT 2009}, event_name = {12th IFIP TC13 International Conference on Human-Computer Interaction}, event_place = {Uppsala, Sweden}, state = {published}, ISBN = {978-3-642-03658-3}, DOI = {10.1007/978-3-642-03658-3_3}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Reiterer H} } @Poster{ SchultzL2009, title = {BOLD signal in intraparietal sulcus covaries with magnitude of implicitly driven attention shifts}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {137}, abstract = {A lot is known about the neural basis of directing attention based on explicit cues. In real life however, attention shifts are rarely directed by explicit cues but rather generated implicitly, for example on the basis of previous experience. Here, we aimed at studying attention shifts dependent on recent trial history. We asked observers to detect targets in a stream of visual stimuli with three feature dimensions: colour, shape and motion. Critically, target occurrence probability was always higher in one stimulus dimension than in the others, and probabilities switched between dimensions over blocks of trials. After each probability switch, target detection times decreased exponentially for high-probability targets and increased for low-probability targets, compatible with gradual shifts in attention dependent on trial history since the switch. BOLD signal in left prefrontal and intraparietal sulcus regions was higher in the early phase after the switch, while anterior cingulate, cuneus, precuneus, temporal and more anterior frontal regions showed more activation later after the switch. These findings are compatible with expectation about engagement of regions involved in the establishment and maintenance of attentional sets. BOLD signal in left intraparietal sulcus correlated with the size of the performance changes consecutive to the detected targets, suggesting that it reflects the size of attention shifts induced by updating target probabilities over recent trial history.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Lennert T{lennert}{Department Human Perception, Cognition and Action}} } @Poster{ 5954, title = {Going beyond universal expressions: investigating the visual perception of dynamic facial expressions}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {83}, abstract = {Investigations of facial expressions have focused almost exclusively on the six so-called universal expressions. During everyday interaction, however, a much larger set of facial expressions is used for communication. To examine this mostly unexplored space, we developed a large video database for emotional and conversational expressions: native German participants performed 58 expressions based on pre-defined context scenarios. Three experiments were performed to investigate the validity of the scenarios and the recognizability of the expressions. In Experiment 1, ten participants were asked to freely name the facial expressions that would be elicited given the scenarios. The scenarios were effective: 82% of the answers matched the intended expressions. In Experiment 2, ten participants had to identify 55 expression videos of ten actors, presented successively. We found that 20 expressions could be identified reliably without any context. Finally, in Experiment 3, twenty participants had to group the 55 expression videos based on similarity while allowing for repeated comparisons. Out of the 55 expressions, 45 formed a consistent group, respectively, showing that visual comparison facilitates the recognition of conversational expressions. Interestingly, none of the experiments found any advantage for the universal expressions, demonstrating the robustness with which we interpret conversational facial expressions.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Cunningham DW{dwc}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5867, title = {Head mobility influences gaze behavior across natural viewing tasks}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {166}, abstract = {Natural gaze behavior is often studied under conditions that restrain head movements. Here, we report how the availability of head movement can influence gaze behavior on wall-sized images of natural outdoor scenes (field-of- view: ~90°). Participants performed half of the experiment with complete head mobility and the remaining trials with their heads restrained in a chin-rest. They were required to either rate the images for attractiveness (i.e., free-viewing) or to count the visible animals (i.e., visual search). On average, more fixations were found on the trials that allowed for head movements (unrestrained: 4.21 fixations/sec; restrained: 3.75 fixations/sec), which were also shorter in their mean duration (unrestrained: 221 ms; restrained: 252 ms). In addition, unrestrained gaze contained a larger proportion of small amplitude saccades (i.e., less than 5°), than head-restrained gaze. Finally, our participants demonstrated a general preference in fixating regions that were close to the central eye-in-h ead orientation. Altogether, these findings suggest that the availability of head movements allowed our participants to re-orient to regions of interest and sample these regions more frequently. This sampling benefit applied to both visual search and free viewing tasks. The current findings emphasize the importance of allowing head mobility when studying natural gaze behavior.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Herholz S{sherholz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming R{roland}{Department Human Perception, Cognition and Action}} } @Poster{ 6082, title = {Influences of task complexity and individual differences on the performance of gaze-assisted human-machine interfaces}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {172}, abstract = {Human-machine interfaces can be enhanced by incorporating knowledge of the user's current point of regard. For example, Zhai and colleagues (1999) showed that faster task completion times could be achieved on a simple pointing task if the display pointer was translocated according to the user's gaze. This manipulation removes the need to manually move the pointer and hence, promises time-savings that grows in proportion to display size. Here, we report the findings of applying the same technique on a wall-sized display (2.2 m × 1.8 m), across more complex pointing task. Two main components comprised the four tasks that participants were required to perform, with and without gaze-assisted pointing: Namely, conjunctive search of colored shapes and click-and-drag of items to a circumscribed region. Contrary to previous findings, we found that gaze-assisted pointer placement significantly increased task completion times, relative to manual pointer placement. Detailed analyses revealed that task complexity and individual differences in gaze behaviour and eye-hand coordination had an adverse effect on task performance, which emphasizes the importance of considering these factors in future implementations of gaze-assisted interfaces.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Reiterer H} } @Poster{ 5918, title = {Re-learning face recognition: evidence for efficient strategies without holistic processing}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {100}, abstract = {In previous experiments, we showed an advantage for visual over haptic face recognition. Promoting serial encoding in vision using a novel gaze-restricted display, we found that these differences are due to modality-specific encoding strategies (holistic in vision vs serial in haptics), and that serial encoding leads to featural (vs holistic) processing. Here, we test how the observed encoding and processing differences might be affected by expertise. Participants were trained on five consecutive days on a set of 19 faces using an old/new recognition task for which three faces were learned with feedback, followed by four test-blocks. On days 1 (pre-test), 4 (post-test), and 5, the task consisted of two upright and two inverted test-blocks. On day 5, we tested participants on a different set of faces. Performance for upright faces was low on day 1, but improved significantly through training ( d1'=1.11, d4'=3.75). Importantly, this learning effect generalized to a new face-set on day 5 ( d5'=3.07). Although performance significantly improved through training, we found no inversion effect on any day ( d1'=1.70, d4'=3.54) indicating no change in processing strategies. Our results show that participants can develop efficient, generalizing strategies to compensate for encoding differences, and that these strategies do not require holistic encoding.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ Bulthoff2009, title = {Sex categorization is influenced by facial information about identity}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {78}, abstract = {According to Bruce and Young's (1986 British Journal of Psychology 77 305 - 327) classic model of face recognition, sex-related information about a face is accessed independently of information about identity. Therefore familiarity with a face should not influence sex categorization. This issue of independence has remained controversial as it has been supported in some studies and questioned in others. Here we used faces that were presented in two conditions: sex-unchanged and sex-changed. Participants were very familiar with some of the identities. For all participants, the unchanged familiar faces presented congruent identity and sex information while the sex-changed familiar faces presented incongruent identity and sex information. Participants performed a sex categorization task on all familiar and unfamiliar faces presented in the unchanged and sex-changed condition. They were asked to ignore identity and base their responses solely on the sex appearance of the faces. Our results show that participants were slower and less correct for sex-changed than for unchanged familiar faces while those differences did not appear for unfamiliar faces. These results indicate that sex and identity are not independent as participants could not ignore identity information while doing a sex categorization task.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ 5953, title = {Exploring visual and haptic object categorization}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {159}, abstract = {Humans combine visual and haptic shape information in object processing. To investigate commonalities and differences of these two modalities for object categorization, we performed similarity ratings and three different categorization tasks visually and haptically and compared them using multidimensional scaling techniques. As stimuli we used a 3-D object space, of 21 complex parametrically-defined shell-like objects. For haptic experiments, 3-D plastic models were freely explored by blindfolded participants with both hands. For visual experiments, 2-D images of the objects were used. In the first task, we gathered pair-wise similarity ratings for all objects. In the second, unsupervised task, participants freely categorized the objects. In the third, semi-supervised task, participants had to form exactly three groups. In the fourth, supervised task, participants learned three prototype objects and had to assign all other objects accordingly. For all tasks we found that within-category distances were smaller than across-category distances. Categories form clusters in perceptual space with increasing density from unsupervised to supervised categorization. In addition, the unconstrained similarity ratings predict the categorization behavior of the unsupervised categorization task best. Importantly, we found no differences between the modalities in any task showing that the processes underlying categorization are highly similar in vision and haptics.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 4689, title = {Gaze behavior in face comparison: The roles of sex, task, and symmetry}, journal = {Attention, Perception and Psychophysics}, year = {2009}, month = {7}, volume = {71}, number = {5}, pages = {1107-1126}, abstract = {Knowing where people look on a face provides an objective insight into the information entering the visual system and into cognitive processes involved in face perception. In the present study, we recorded eye movements of human participants while they compared two faces presented simultaneously. Observers‘ viewing behavior and performance was examined in two tasks of parametrically varying difficulty, using two types of face stimuli (sex morphs and identity morphs). The frequency, duration, and temporal sequence of fixations on previously defined areas of interest in the faces were analyzed. As was expected, viewing behavior and performance varied with difficulty. Interestingly, observers compared predominantly the inner halves of the face stimuli—a result inconsistent with the general left-hemiface bias reported for single faces. Furthermore, fixation patterns and performance differed between tasks, independently of stimulus type. Moreover, we found differences in male and female participants‘ viewing behaviors, but only when the sex of the face stimuli was task relevant.}, web_url = {http://app.psychonomic-journals.org/content/71/5/1107.full.pdf+html}, state = {published}, DOI = {10.3758/APP.71.5.1107}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5943, title = {From unsupervised to supervised categorization in vision and haptics}, year = {2009}, month = {7}, volume = {10}, number = {679}, pages = {172-173}, abstract = {Categorization studies have primarily focused on the visual percept of objects. But in every-day life humans combine percepts from different modalities. To better understand this cue combination and to learn more about the mechanisms underlying categorization, we performed different categorization tasks visually and haptically and compared the two modalities. All experiments used the same set of complex, parametrically-defined, shell-like objects based on three shape parameters (see figure and [Gaissert, N., C. Wallraven and H. H. Bülthoff: Analyzing perceptual representations of complex, parametrically-defined shapes using MDS. Eurohaptics 2008, 265-274]). For the visual task, we used printed pictures of the objects, whereas for the haptic experiments, 3D plastic models were generated using a 3D printer and explored by blindfolded participants using both hands. Three different categorization tasks were performed in which all objects were presented to participants simultaneously. In an unsupervised task participants had to categorize the objects in as many groups as they liked to. In a semi-supervised task participants had to form exactly three groups. In a supervised task participants received three prototype objects (see figure) and had to sort all other objects into three categories defined by the prototypes. The categorization was repeated until the same groups were formed twice in a row. The amount of repetitions needed across modalities was the same, showing that the task was equally hard visually and haptically. For more detailed analyses we generated similarity matrices based on which stimulus was paired with which other stimulus. As a measure of consistency – within and across modalities as well as within and across tasks – we calculated cross correlations between these matrices (see figure). Correlations within modalities were always higher than across modalities. In addition, as expected, the more constrained the task, the more consistently participants grouped the stimuli. Critically, multi-dimensional scaling analysis of the similarity matrices showed that all three shape parameters were perceived visually and haptically in all categorization tasks, but that the weighting of the parameters was dependent on the modality. In line with our previous results, this demonstrates the remarkable robustness of visual and haptic processing of complex shapes.}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/679}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, event_place = {New York, NY, USA}, state = {published}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5917, title = {Visual experience supports haptic face recognition: Evidence from the early- and late-blind}, year = {2009}, month = {7}, volume = {10}, number = {547}, pages = {81}, abstract = {In previous experiments, we provided further evidence that the haptic and visual systems both have the capacity to process faces, and that face information can be shared across sensory modalities [1]. Interestingly, we found this information transfer across modalities to be asymmetric and limited by haptic face processing. Visual face perception relies on specific processes that evolve with perceptual expertise, while we have little to no training in haptic face recognition throughout life. We, therefore, suggest that the observed asymmetry in visual and haptic face processing might be attributed to different levels of expertise. To test the importance of visual experience with faces also for haptic recognition we studied haptic face recognition in the early-blind (N=10), late-blind (N=9) and sighted (N=18). Participants performed an old/new recognition task for which sets of three faces were learned haptically, followed by three subsequent haptic test-blocks. We found that early-blind participants could recognize faces haptically, although recognition accuracy was low (d’= 0.83). More interestingly, however, recognition accuracy was significantly better in late-blind (d’=1.56) as well as sighted (d’=1.42) participants. Our results, therefore, suggest that behavioral benefits in haptic face recognition require visual experience with faces. A lack thereof cannot be compensated for by purely perceptual haptic expertise as the results for the early-blind show. These findings suggest that haptic face recognition can recruit specific visual processing mechanisms that are shaped by visual experience [2].}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/547}, event_name = {10th International Multisensory Research Forum (IMRF 2009)}, event_place = {New York, NY, USA}, state = {published}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 5679, title = {BOLD signal in intraparietal sulcus covaries with magnitude of implicitly driven attention shifts}, journal = {NeuroImage}, year = {2009}, month = {5}, volume = {45}, number = {4}, pages = {1314-1328}, abstract = {A lot is known about the neural basis of directing attention based on explicit cues. In real life however, attention shifts are rarely directed by explicit cues but rather generated implicitly, for example on the basis of previous experience with a given situation. Here, we aimed at studying attention shifts dependent on recent trial history. While explicitly cued attention shifts involve activity in cortex of the intraparietal sulcus, whether this region is also involved in shifting attention according to recent history is still unknown. We asked observers to detect targets in a stream of visual stimuli with three feature dimensions: Color, shape and motion. Critically, target occurrence probability was always higher in one stimulus dimension than in the others, and probabilities switched between dimensions over blocks of trials. After each probability switch, target detection times decreased exponentially for high-probability targets and increased for low-probability targets, compatible with gradual shifts in attention dependent on trial history since the switch. BOLD signal in left prefrontal and intraparietal sulcus regions was higher in the early phase after the switch, while anterior cingulate, cuneus, precuneus, temporal and more anterior frontal regions showed more activation later after the switch. These findings are compatible with the engagement of regions involved in the establishment and maintenance of attentional sets. BOLD signal in left intraparietal sulcus correlated with the size of the performance changes consecutive to the detected targets, suggesting that it reflects the size of attention shifts induced by updating target probabilities over recent trial history.}, file_url = {/fileadmin/user_upload/files/publications/SchultzLennert_Stream_accepted_withFigs_formatted_5679[0].pdf}, state = {published}, DOI = {10.1016/j.neuroimage.2009.01.012}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Lennert T{lennert}{Department Human Perception, Cognition and Action}} } @Article{ 5678, title = {Natural facial motion enhances cortical responses to faces}, journal = {Experimental Brain Research}, year = {2009}, month = {4}, volume = {194}, number = {3}, pages = {465-475}, abstract = {The ability to perceive facial motion is important to successfully interact in social environments. Previously, imaging studies have investigated neural correlates of facial motion primarily using abstract motion stimuli. Here, we studied how the brain processes natural non-rigid facial motion in direct comparison to static stimuli and matched phase-scrambled controls. As predicted from previous studies, dynamic faces elicit higher responses than static faces in lateral temporal areas corresponding to hMT+/V5 and STS. Interestingly, analyses of individually-defined, static-face-sensitive regions in bilateral fusiform gyrus and left inferior occipital gyrus also respond more to dynamic than static faces. These results suggest integration of form and motion information during the processing of dynamic faces even in ventral temporal and inferior lateral occipital areas. In addition, our results show that dynamic stimuli are a robust tool to localize areas related to the processing of static and dynamic face info rmation.}, file_url = {/fileadmin/user_upload/files/publications/SchultzPilz_facelocaliser_acceptedWithFigs_5678[0].pdf}, web_url = {http://www.springerlink.com/content/2l46216834155751/}, state = {published}, DOI = {10.1007/s00221-009-1721-9}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Pilz KS{kpilz}{Department Human Perception, Cognition and Action}} } @Conference{ ArmannB2009, title = {Categorical perception of male and female faces depends on familiarity}, year = {2009}, month = {4}, pages = {3}, abstract = {The perception of face identity, race and also facial expressions has been shown to be categorical. For another characteristic of faces, sex, results have been conflicting so far. To resolve this controversy, we created male and female faces with similar perceived degrees of 'maleness' and 'femaleness', based on extensive ratings of faces and sex morphs from our face database. We then created sex continua using these controlled stimuli and tested categorical perception (CP) with classical discrimination and classification tasks. Participants were naïve (1), or had been familiarized with average faces of both sexes (2), or with the 'controlled' male and female faces (3). Our results confirm the lack of naturally occurring CP for sex in (1). Moreover, since only participants in (3) showed clear CP, our results suggest (as stated in the „single-route hypothesis‟) that the processing of sex and identity information in faces is not independent from each other. We found no evidence that familiarization with sex information (as given by average male and female faces) transfers to individual faces.}, web_url = {https://www.psychology.org.au/Assets/Files/2009-Combined-Abstracts.pdf}, event_name = {2009 Australian Psychology Conferences: 36th Australasian Experimental Psychology Conference}, event_place = {Wollongong, Australia}, state = {published}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ 5700, title = {Visual Perception of dynamic facial expressions}, year = {2009}, month = {1}, abstract = {Im Kontakt mit dem Kunden nimmt Kommunikation die wichtigste Rolle ein. Dabei beträgt der Anteil der nonverbalen Kommunikation erstaunliche 90%. Gesichtsausdrücke sind eine Art der Möglichkeiten, wie beide - Ihr Kunde und Sie - miteinander kommunizieren. Über diese Kommunikationsform ist bisher nur wenig bekannt: Wie interpretieren wir Gesichter? Worauf müssen wir achten? Im Rahmen dieses Vortrags sollen Erkenntnisse der Forschung im Bereich der visuellen Verarbeitung von Gesichtsausdrücken vorgestellt werden, die für die nonverbale Kommunikation essentiell sind.}, web_url = {http://www.opti-munich.com/?lang=de}, event_name = {International Trade Fair for Trends in Optics (OPTI 2009)}, event_place = {München, Germany}, state = {published}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5470, title = {LibGaze: Real-time gaze-tracking of freely moving observers for wall-sized displays}, year = {2008}, month = {10}, pages = {101-110}, abstract = {We present a mobile system for tracking the gaze of an observer in real-time as they move around freely and interact with a wall-sized display. The system combines a head-mounted eye tracker with a mo- tion capture system for tracking markers attached to the eye tracker. Our open-source software library libGaze provides routines for calibrating the sys- tem and computing the viewer’s position and gaze direction in real-time. The modular architecture of our system supports simple replacement of each of the main components with alternative technology. We use the system to perform a psychophysical user-study, designed to measure how users visually explore large displays. We find that observers use head move- ments during gaze shifts, even when these are well within the range that can be com- fortably reached by eye movements alone. This suggests that free movement is important in nor- mal gaze behaviour,motivating further applications in which the tracked user is free to move.}, file_url = {fileadmin/user_upload/files/publications/VMV-2008-Herholz.pdf}, web_url = {https://www.researchgate.net/publication/264879670_LibGaze_Real-time_gaze-tracking_of_freely_moving_observers_for_wall-sized_displays_Vision_Modeling_and_Visualization_Proceedings}, editor = {Deussen, O. , D. Keim}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, event_name = {13th International Fall Workshop on Vision, Modeling, and Visualization (VMV 2008)}, event_place = {Konstanz, Germany}, state = {published}, author = {Herholz S{sherholz}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Tanner TG{tanner}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}} } @Poster{ GaissertWB2008, title = {Analyzing haptic and visual object categorization of parametrically-de fined shapes}, year = {2008}, month = {10}, volume = {9}, number = {6}, abstract = {To investigate multi-sensory, perceptual representations of three-dimensional object spaces, we generated complex, shell-shaped objects by altering three parameters defining shell shape. For haptic experiments, 3D-printed plastic models were freely explored by blindfolded participants with both hands. For visual experiments, we used 2D images of these objects. Previously, we reported results of a similarity rating task in which we split the three-dimensional object space into three orthogonal planes. Multidimensional scaling (MDS) of the pair-wise similarity ratings showed that participants reproduced the three planes almost exactly both visually and haptically. Here, we report results of a categorization task in which all objects were presented simultaneously either visually or haptically to ten participants who then categorized the objects in as many groups as they liked to. MDS analyses revealed a three-dimensional perceptual space underlying both visual and haptic data. Interestingly, the three dimensions corresponded to the parameters of shell shape with a different weighting of the dimensions in the visual and the haptic condition. Our results show that humans are able to reproduce the underlying parameters of a complex, three-dimensional object space in a similarity and categorization task using either visual or haptic modalities surprisingly well.}, event_name = {9th Conference of the Junior Neuroscientists of Tübingen (NeNa 2008)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ ArmannB2008, title = {Categorical Perception of Male and Female Faces and the Single-Route Hypothesis}, year = {2008}, month = {10}, pages = {13}, abstract = {The cognitive process of categorizing perceptually similar stimuli into qualitatively different categories is fundamental to any systematic acting upon the world, as it helps to reduce the immense number of entities to more manageable fragments and spares us from learning anew each time we encounter an unknown individual. Categories are evident in all sensory modalities and range from relatively simple (e. g., color perception) to the most abstract human concepts, as for example faces. Categorical perception (CP) has been shown for face identity (e. g., Beale & Keil 1995), ethnicity (Levin & Beale, 2000), and facial expression (Calder et al., 1996). Astonishingly, for sex, a natural facial characteristic consisting of only two biologically relevant categories, conicting results have been reported so far. CP for sex has been shown (Campanella et al., 2001) when sex information was varied linearly (by morphing) between male and female face identities, thus intermixing identity and sex information. When sex continua were created based on single face identities (Büulthoff and Newell, 2004), no CP for sex was found in native participants. So the question remained open whether or not there is CP for the perception of sex as a facial dimension or if processing of the sex of a face is directly linked to processing of the face's identity, as proposed by the "`single-route hypothesis"' (e. g., by Rossion, 2002, Ganel & Goshen {Gottstein, 2002; Büulthoff & Newell, 2004). To overcome one potential constraint of earlier studies, i. e., 'asymmetric' sex morph continua, we performed extensive ratings of faces and sex morphs from our face database, to create 'controlled' male and female faces with similar perceived degrees of 'maleness' and 'femaleness'. We then examined CP of sex for these faces with classical discrimination and classification experiments. Critically, we manipulated the degree of familiarization of the faces prior to testing, as follows. Observers were either native, or familiarized with the average male and female face of all faces, or the endpoint identities of the morph continua, or with other male and female faces with the same perceived degree of maleness and femaleness than the test faces. Our results confirm the lack of naturally occurring CP for sex and provide more evidence for the linked processing of sex and identity, as participants showed clear CP only after familiarization with the test face identities.}, event_name = {9th Conference of the Junior Neuroscientists of Tübingen (NeNa 2008)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5215, title = {A visual but no haptic face inversion effect indicates modality-specific processing differences}, journal = {Perception}, year = {2008}, month = {8}, volume = {37}, number = {ECVP Abstract Supplement}, pages = {5}, abstract = {In previous experiments, we provided further evidence that 3-D face stimuli can be learnt and recognized across haptic and visual modalities. Our results suggested information transfer across modalities to be asymmetric due to differences in visual versus haptic face processing (ie, configural vs featural). To test this hypothesis, we designed two experiments investigating a visual, haptic and cross-modal face-inversion effect: Experiment 1 used an old/new recognition task in which three upright faces were learnt visually followed by three visual test-blocks (one with upright and two with inverted faces) and one haptic test-block with inverted faces. We found a strong inversion effect for visually learnt faces (visual-upright: d'=2.07, visual-inverted: d'=0.6, haptic-inverted: d'=0.52). When we exchanged learning and testing modalities in Experiment 2 (haptic learning of upright faces followed by one haptic-upright, two haptic-inverted and one visual-inverted test-blocks), we failed to find an inversion effect for haptically learnt faces (haptic-upright: d'=1.45, haptic-inverted: d'=1.75, visual-inverted: d'=1.16). Whereas visual face processing thus operates configurally, haptic processing seems to rely on featural information.}, web_url = {http://pec.sagepub.com/content/37/1_suppl.toc}, event_name = {31st European Conference on Visual Perception}, event_place = {Utrecht, Netherlands}, state = {published}, DOI = {10.1177/03010066080370S101}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5693, title = {Categorical perception of male and female faces and the single-route hypothesis}, journal = {Perception}, year = {2008}, month = {8}, volume = {37}, number = {ECVP Abstract Supplement}, pages = {117}, abstract = {Categorical perception (CP) has been demonstrated for face identity and facial expression, while conflicting results have been reported for sex. Furthermore, the question whether processing of sex and identity information is linked remains open. Based on extensive ratings of faces and sex morphs from our face database, we created 'controlled' male and female faces with similar perceived degrees of 'maleness' and 'femaleness'. We then examined CP of sex for these faces with classical discrimination and classification tasks using sex continua. Participants were naive (1), or had been familiarized with average faces of both sexes (2), or with the 'controlled' male and female faces (3). Our results confirm the lack of naturally occurring CP for sex in (1). Furthermore, they provide more evidence for the linked processing of sex and identity, as only participants in (3) showed clear CP. We found no evidence that familiarization with sex information (as given by average male and female faces) transfers to individual faces.}, web_url = {http://pec.sagepub.com/content/37/1_suppl.toc}, event_name = {31st European Conference on Visual Perception}, event_place = {Utrecht, Netherlands}, state = {published}, DOI = {10.1177/03010066080370S101}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5680, title = {Perception of animacy from a single moving object}, journal = {Perception}, year = {2008}, month = {8}, volume = {37}, number = {ECVP Abstract Supplement}, pages = {154}, abstract = {Humans attribute animacy even to very simple objects displaying self-propelled or goal-directed motion. To test attribution of animacy parametrically using classical psychophysical techniques, we created animations consisting of a single dot that appeared either self-propelled (modelled on the movements of a fly) or moved by an external force (modelled on a leaf drifting in the wind). Both animations were built using the same movement equation and differed in speed and acceleration profiles, allowing parametric morphing from one ‘extreme‘ animation to the other. Low-level stimulus properties (range of screen positions covered, speed or acceleration) did not vary systematically during morphing. 26 naive subjects were asked to rate the ‘extreme‘ animations and 4 intermediate morphs for animacy. Ratings from 19 subjects as well as averages over all subjects could be modelled by a cumulative Gaussian, median PSE was in the middle of the morph range and the median JND was 1.7. These stimuli thus allow parametric testing of animacy perception from single objects with movements modelled on real animate entities.}, file_url = {/fileadmin/user_upload/files/publications/jschultzECVP2008_v2_[0].pdf}, web_url = {http://pec.sagepub.com/content/37/1_suppl.toc}, event_name = {31st European Conference on Visual Perception}, event_place = {Utrecht, Netherlands}, state = {published}, DOI = {10.1177/03010066080370S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}} } @Conference{ 5471, title = {Visual and haptic perceptual representations of complex 3-D objects}, journal = {Perception}, year = {2008}, month = {8}, volume = {37}, number = {ECVP Abstract Supplement}, pages = {125}, abstract = {In this study we combined two new techniques to investigate visual and haptic perceptual representations of three-dimensional, parametrically-defined shapes. We generated a 3-D object space of shell-shaped objects by altering three model parameters defining shell shape. We created 21 equidistant plastic models of the objects with a 3-D printing device. Haptic exploration was done by having blindfolded participants explore these objects with both hands and no restrictions to the exploratory procedure. To ensure visual interaction without any haptic information, visual representation of these objects were presented to participants via a head-mounted display. Participants manipulated a position-tracked physical substitute to rotate the objects on the display. Pairwise similarity ratings were performed and analysed using multidimensional scaling techniques. Both visual and haptic perceptual representations were highly consistent with the underlying physical three-dimensional parameter space. Interestingly, haptic exploration resulted in a more precise perceptual representation than the visual condition. Additionally, very similar MDS maps of the visual and the haptic exploration provide evidence that one shared perceptual space is underlying both modalities.}, web_url = {http://pec.sagepub.com/content/37/1_suppl.toc}, event_name = {31st European Conference on Visual Perception}, event_place = {Utrecht, Netherlands}, state = {published}, DOI = {10.1177/03010066080370S101}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5472, title = {Analyzing haptic and visual object categorization of parametrically-defined shapes}, year = {2008}, month = {7}, volume = {9}, number = {233}, pages = {192}, abstract = {To investigate multi-sensory, perceptual representations of three-dimensional object spaces, we generated complex, shell-shaped objects by altering three parameters defining shell shape. For haptic experiments, 3D-printed plastic models were freely explored by blindfolded participants with both hands. For visual experiments, we used 2D images of these objects. Previously, we reported results of a similarity rating task in which we split the three-dimensional object space into three orthogonal planes. Multidimensional scaling (MDS) of the pair-wise similarity ratings showed that participants reproduced the three planes almost exactly both visually and haptically. Here, we report results of a categorization task in which all objects were presented simultaneously either visually or haptically to ten participants who then categorized the objects in as many groups as they liked to. MDS analyses revealed a three-dimensional perceptual space underlying both visual and haptic data. Interestingly, the three dimensions corresponded to the parameters of shell shape with a different weighting of the dimensions in the visual and the haptic condition. Our results show that humans are able to reproduce the underlying parameters of a complex, three-dimensional object space in a similarity and categorization task using either visual or haptic modalities surprisingly well.}, web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2008/}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, event_place = {Hamburg, Germany}, state = {published}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5214, title = {Encoding differences in visual and haptic face recognition}, year = {2008}, month = {7}, volume = {9}, number = {214}, pages = {190}, abstract = {In previous experiments, we provided further evidence that 3-D face stimuli can be learned and recognized by touch alone. Performance was significantly improved when haptic memory was refreshed during the experiment, indicating high memory demands due to the serial encoding process of haptic exploration. We also found that performance in a complementary visual experiment was better than in the haptic one. We suggested that these results arise from differences in encoding procedures (holistic in vision vs. serial in haptics). To test this hypothesis we designed the following two experiments which promoted serial encoding also in vision: Experiment 1 used the same old/new recognition task for which three faces were learned with three subsequent test-blocks. Participants used a mouse to move a Gaussian window which uncovered 2° of a photograph of the 3-D face. Recognition accuracy was low(d'=.98), equivalent to non-refreshed haptic performance, and significantly lower than for unrestricted visual recognition(d’=2.12). Using the same design in Experiment 2, memory was refreshed by repeated exposure to the learned faces. Performance increased significantly(d'=1.64) to levels of memory-refreshed haptic performance and unrestricted visual recognition. The performance differences in visual and haptic face recognition therefore might be attributed to modality-specific encoding strategies and memory demands.}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, event_place = {Hamburg, Germany}, state = {published}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 4686, title = {A dynamic object-processing network: Metric shape discrimination of dynamic objects by activation of occipito-temporal, parietal and frontal cortex}, journal = {Cerebral Cortex}, year = {2008}, month = {6}, volume = {18}, number = {6}, pages = {1302-1313}, abstract = {Shape perception is important for object recognition. However, behavioural studies have shown that rigid motion also contributes directly to the recognition process, in addition to providing visual cues to shape. Using psychophysics and functional brain imaging, we investigated the neural mechanisms involved in shape and motion processing for dynamic object recognition. Observers discriminated between pairs of rotating novel objects in which the three-dimensional shape difference between the pair was systematically varied in metric steps. In addition, the objects rotated in either the same or different direction to determine the effect of task-irrelevant motion on behaviour and neural activity. We found that observers’ shape discrimination performance increased systematically with shape differences, as did the haemodynamic responses of occipito-temporal, parietal and frontal regions. Furthermore, responses in occipital regions were only correlated with observers’ perceived shape differences. We also found d ifferent effects of object motion on shape discrimination across observers which were reflected in responses of the superior temporal sulcus. These results suggest a network of regions that are involved in the discrimination of metric shape differences for dynamic object recognition.}, web_url = {http://cercor.oxfordjournals.org/content/18/6/1302.full.pdf+html}, state = {published}, DOI = {10.1093/cercor/bhm162}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5162, title = {Analyzing perceptual representations of complex, parametrically-defined shapes using MDS}, year = {2008}, month = {6}, pages = {265-274}, abstract = {In this study we show that humans are able to form a perceptual space from a complex, three-dimensional shape space that is highly congruent to the physical object space no matter if the participants explore the objects visually or haptically. The physical object space consists of complex, shell-shaped objects which were generated by varying three shape parameters. In several psychophysical experiments participants explored the objects either visually or haptically and performed similarity ratings. Multidimensional scaling (MDS) analyses showed high congruency of the visual and haptic perceptual space to the physical object space. Additionally, visual and haptic exploration resulted in very similar MDS maps providing evidence for one shared perceptual space underlying both modalities.}, file_url = {/fileadmin/user_upload/files/publications/Eurohaptics2008-Gaissert_5162[0].pdf}, web_url = {http://www.disam.upm.es/~eurohaptics2008/}, editor = {Ferre, M.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 5024}, booktitle = {Haptics: Perception, Devices and Scenarios}, event_name = {6th International Conference EuroHaptics 2008}, event_place = {Madrid, Spain}, state = {published}, ISBN = {978-3-540-69056-6}, DOI = {10.1007/978-3-540-69057-3_31}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ VuongS2008, title = {Dynamic objects are more than the sum of their views: Behavioural and neural signatures of depth rotation in object recognition}, journal = {Journal of Vision}, year = {2008}, month = {6}, volume = {8}, number = {6}, pages = {39}, abstract = {Motion plays an important role in object recognition at both the behavioural and neural levels. For example, studies have shown that observers extrapolate to unfamiliar views of objects rotating in depth when the motion is smooth and predictable. Using a combined psychophysics and fMRI study, we tested whether the smoothness of rotation affected performance and neural responses. Sixteen observers performed a same-different discrimination task in a 3T scanner at the Max Planck Institute. They were presented with a probe-test stimulus sequence, and judged whether both depicted the same or different objects. In blocks of trials, the probe stimulus was either a static image of an object, a smooth animation of a rotating object, or a scrambled animation of an object in which the frames of a smooth animation were randomized. Importantly, both motion blocks presented the same set of views. Within a block, the test stimulus was an image which depicted the object from unfamiliar views that preceded (pre condition) or continued the observed rotation trajectory (post condition). The blocks were optimized to counterbalance for history effects. Observers responded more quickly in the post than pre condition with smooth animations but responded equally fast for these conditions in scrambled and static blocks. Whole-brain group analyses showed that parietal regions were more active in smooth than scrambled blocks, frontal regions were more active for smooth than static blocks, and medial temporal regions were more active in both motion blocks relative to static blocks. These regions are known to process dynamic stimuli. Preliminary analyses of the time courses within these regions show different patterns of activation between pre and post conditions across the different blocks. Overall, the results highlight the importance of smooth motion, and suggest that a rotating object is more than the sum of its views.}, web_url = {http://www.journalofvision.org/content/8/6/39}, event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)}, event_place = {Naples, FL, USA}, state = {published}, author = {Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Article{ WallravenKKP2008, title = {In the eye of the beholder: The perception of indeterminate art}, journal = {Leonardo}, year = {2008}, month = {4}, volume = {41}, number = {2}, pages = {116-117}, state = {published}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Kaulard K{kascot}{Department Human Perception, Cognition and Action}; K\"urner C{cora}{Department Human Perception, Cognition and Action}; Pepperell R} } @Thesis{ 5699, title = {Visual Perception of dynamic facial expressions: Implementation and validation of a database for conversational facial expressions}, year = {2008}, month = {2}, state = {published}, type = {Diplom}, author = {Kaulard K{kascot}{Department Human Perception, Cognition and Action}} } @Article{ 4870, title = {Examining art: dissociating pattern and perceptual influences on oculomotor behaviour}, journal = {Spatial Vision}, year = {2007}, month = {12}, volume = {21}, number = {1}, pages = {165-184}, abstract = {When observing art the viewer’s understanding results from the interplay between the marks made on the surface by the artist and the viewer’s perception and knowledge of it. Here we use a novel set of stimuli to dissociate the influences of the marks on the surface and the viewer’s perceptual experience upon the manner in which the viewer inspects art. Our stimuli provide the opportunity to study situations in which (1) the same visual stimulus can give rise to two different perceptual experiences in the viewer, and (2) the visual stimuli differ but give rise to the same perceptual experience in the viewer. We find that oculomotor behaviour changes when the perceptual experience changes. Oculomotor behaviour also differs when the viewer’s perceptual experience is the same but the visual stimulus is different. The methodology used and insights gained from this study offer a first step toward an experimental exploration of the relative influences of the artist’s creation and viewer’s perception when viewing art and also toward a better understanding of the principles of composition in portraiture.}, web_url = {http://springerlink.metapress.com/content/58h411820678484p/fulltext.pdf}, state = {published}, DOI = {10.1163/156856807782753903}, author = {Tatler BW; Wade NJ; Kaulard K{kascot}} } @Poster{ GaisertWB2017, title = {Analyzing perceptual representations of complex, parametrically-defined shapes using MDS}, year = {2007}, month = {11}, day = {27}, pages = {24}, abstract = {Prior studies have shown that humans can create a perceptual space of 3D objects that is highly congruent to the physical stimulus space when the underlying stimulus space varies in two different dimensions like global shape and local texture (Cooke et al., 2006). But what happens if the stimulus space varies in more than two dimensions? And what happens if those dimensions are not as intuitive as "shape" and "texture"? As a first step to answer these questions, a stimulus space of complex, shell-shaped objects was generated using the mathematical model of Fowler, Meinhardt and Prusinkiewicz (1992) that describes growth parameters of shells. The objects varied in three dimensions each of which controlled a different aspect of its shape. In psychophysical experiments participants viewed pairs of objects and rated the similarity between them. Multidimensional scaling (MDS) was used to calculate the perceptual space. Contrary to previous experiments, this space showed only little congruency to the physical stimulus space. Additional free categorization and sorting tasks revealed that humans found it difficult to reconstruct the dimensions of the physical stimulus space. In future experiments, we plan to compare these visual similarity ratings to haptic similarity ratings to study cross modal interaction when a complex three-dimensional stimulus space is explored.}, web_url = {http://ikw.uni-osnabrueck.de/~NBP/Abstracts/Abstracts_NeNa07_Tuebingen.pdf}, event_name = {8th Conference of Tuebingen Junior Neuroscientists (NeNa 2007)}, event_place = {Freudenstadt, Germany}, state = {published}, author = {Gaissert N{ninagaissert}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ DobjansWB2007, title = {Cross modal transfer in face recognition}, year = {2007}, month = {11}, day = {27}, pages = {15}, abstract = {Prior studies have shown that humans can recognize faces by touch alone (Kilgour and Lederman, 2002). Here we want to shed further light on haptic face recognition with five experiments using a well-defined stimulus face space based on the morphable MPI-Face-Database. Experiment 1 used a same/different task with sequentially presented faces which established that subjects were able to discriminate faces haptically, using short term memory. In Experiment 2 we used an old/new recognition task to assess whether participants were able to learn and recognize faces haptically. Moreover, we addressed the question whether participants were able to generalize information from haptically learned faces to the visual domain - a question directed at probing the representation underlying multi-sensory face recognition. In Experiment 3, we changed the design such that haptic memory was refreshed before each test-block by repeated exposure to the three learned faces. In Experiment 4, we interchanged learning and recognition modality with respect to Experiments 2 and 3, testing withinmodality recognition in the visual domain and cross-modal transfer by haptic recognition of the face masks. We found that participants were indeed able to learn and recognize small faces haptically, with haptic memory being a crucial factor for recognition performance. Moreover, we found participants to be able to generalize information from haptically learned faces to the visual domain and vice versa, however, with a clear advantage for vision as the learning modality. In Experiment 5, we used a haptic version of the inversion paradigm to study how orientation sensitive haptic face recognition is and to shed further light on the nature of information underlying haptic face processing. As we failed to find a haptic face inversion effect, we suggest that participants rely more on featural than configural information processing in haptic face recognition. Finally, we will briefly discuss current experiments that look at size-dependent effects of haptic face recognition.}, web_url = {http://ikw.uni-osnabrueck.de/~NBP/Abstracts/Abstracts_NeNa07_Tuebingen.pdf}, event_name = {8th Conference of Tuebingen Junior Neuroscientists (NeNa 2007)}, event_place = {Freudenstadt, Germany}, state = {published}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4687, title = {Intracranial electrophysiological correlates in humans during observation of animate-looking moving objects}, year = {2007}, month = {11}, volume = {37}, number = {304.4}, abstract = {An essential need of brain function is the detection of living entities, and one of the major characteristics for their identification is their motion. Humans are very good at recognizing living entities from their motion, and attribute animacy to even very simple objects displaying self-propelled or goal-directed motion. Our previous results (1) show that increasing correlation between the movements of two simple interacting objects leads to A) an increase in the impression of goal-directed motion and of animacy and B) to increasing BOLD signal in the superior temporal sulcus (STS), suggesting that STS is involved in decoding the information leading to the percept of animacy. This is consistent with previous studies implicating the posterior part of the STS in recognition of biological motion. In the current study, 7 volunteering patients undergoing investigations prior to epilepsy surgery observed the animate motion stimuli used in (1). Simultaneously, we recorded multichannel subdural electrocorticogram data from healthy cortex surrounding the STS and performed trial-by-trial frequency decomposition over time. We found that power in the 30-60Hz frequency band (gamma band) between 1 and 2 seconds after stimulus onset showed a significant parametric response to the amount of goal-directed motion, paralleling our previous BOLD signal findings (1). Furthermore, due to the high temporal resolution of these data, we were able to localize at which time points during the animations the strongest response in the STS occurred. Our results confirm the importance of the STS in processing of visual characteristics of animate entities, and suggest that neuronal activity in this area changes over the duration of the animations. We are currently comparing time-varying attributes of the stimuli to the time-course of gamma-band activity to reveal which events in the stimuli drive STS activity.}, web_url = {http://www.sfn.org/am2007/}, event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Cohen MX; Haupt S; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Elger C} } @Poster{ 4715, title = {Crossmodal transfer in face recognition: from haptics to vision}, journal = {Perception}, year = {2007}, month = {8}, volume = {36}, number = {ECVP Abstract Supplement}, pages = {207}, abstract = {Prior studies have shown that humans can recognize faces by touch alone. This study investigated haptic face recognition with two experiments using a well-defined stimulus face-space based on the morphable MPI-Face-Database. In Experiment 1, we used an old/new recognition task for which different sets of three faces (out of six) were learned haptically with three subsequent haptic test-blocks and one visual test-block. We found that participants could recognize faces haptically although recognition accuracy was low (65%) and tended to decrease across blocks. Cross-modal recognition however was at chance level (48%). In Experiment 2, haptic memory was refreshed before each test-block by repeated exposure to the three learned faces. We found that performance increased significantly to 76% and that it became more consistent across blocks. Most importantly, however, we found clear evidence for cross-modal recognition as visual performance rose above chance level (62%). Our results demonstrate that during visual face recognition, participants have access to information learned during h aptic exploration allowing them to perhaps form a visual image from haptic information.}, web_url = {http://pec.sagepub.com/content/36/1_suppl.toc}, event_name = {30th European Conference on Visual Perception}, event_place = {Arezzo, Italy}, state = {published}, DOI = {10.1177/03010066070360S101}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5019, title = {Human observers use personal exploration patterns in novel object recognition}, journal = {Perception}, year = {2007}, month = {8}, volume = {36}, number = {ECVP Abstract Supplement}, pages = {49}, abstract = {Humans learn and recognize objects through active exploration. Sixteen participants freely explored 3-D amoeboid objects in a virtual-reality environment during learning. They handled a device whose spatial coordinates determined the object‘s position relative to its viewpoint. These exploration patterns were also recorded for testing. In a subsequent old/new recognition test, participants either actively explored or passively viewed old (learned) and new objects in the same setup. Generally, active participants performed better than passive participants (in terms of sensitivity: d 0 &#136; 1:08 vs 0.84, respectively). Despite this, those participants who passively viewed objects animated with their personal motion trajectories for learned objects maintained com- parable performance to that of participants who actively explored the objects (d 0 &#136; 1:13). In contrast, passive observers‘ performance decreased when these trajectories were temporally reversed (d 0 &#136; 0:69) or when another observer‘s motion trajectories were used (d 0 &#136; 0:70). While active exploration generally allowed better recognition of objects compared to passive viewing, our observers could rely on idiosyncratic exploration patternsöin which particular aspects of object structure were revealed over timeöto achieve equivalent performance.}, web_url = {http://pec.sagepub.com/content/36/1_suppl.toc}, event_name = {30th European Conference on Visual Perception}, event_place = {Arezzo, Italy}, state = {published}, DOI = {10.1177/03010066070360S101}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5021, title = {The effect of context in face and object recognition}, journal = {Perception}, year = {2007}, month = {8}, volume = {36}, number = {ECVP Abstract Supplement}, pages = {146}, abstract = {Whether recognition and categorization are parallel or serial processes remains controversial. To address this, we investigated whether face recognition is influenced by task-irrelevant face categ- ories. We examined the recognition of a target face presented in the context of other faces of the same or different racial category using a same ^ different matching task. Caucasian partici- pants were presented during learning with a set of six faces displaying one target face among different numbers of same-race faces. Participants recognized Caucasian targets better when five same-race faces rather than a single same-race face were present in the set, while this effect was absent for Asian targets. Surprisingly, participants recognized Asian targets better in sets with equal numbers of Asian and Caucasian context faces. Similar experiments, but with novel objects, were conducted in which categories were defined by similarity or expertise. These factors did not fully account for the context effects observed with faces. Overall, the results suggest that face recognition and categorization interact but other factors such as task difficulty may also affect face recognition.}, web_url = {http://pec.sagepub.com/content/36/1_suppl.toc}, event_name = {30th European Conference on Visual Perception}, event_place = {Arezzo, Italy}, state = {published}, DOI = {10.1177/03010066070360S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4463, title = {Psychophysics for perception of (in)determinate art}, year = {2007}, month = {7}, pages = {115-122}, abstract = {The question of how humans perceive art and how the sensory percept is endowed with aesthetics by the human brain has continued to fascinate psychologists and artists alike. It seems, for example, rather easy for us to classify a work of art as either "abstract" or "representational". The artist Robert Pepperell recently has produced a series of paintings that seek to defy this classification: his goal was to convey "indeterminancy" in these paintings - scenes that at first glance look like they contain an object or belong to a certain genre but that upon closer examination escape a definite determination of their contents. Here, we report results from several psychophysical experiments using these artworks as stimuli, which seek to shed light on the perceptual processing of the degree of abstraction in images. More specifically, the task in these experiments was to categorize a briefly shown image as "abstract" or "representational". Stimuli included Pepperell‘s paintings each of which was paired with a similar representational work of art from several periods and several artistic genres. The results provide insights into the visual processes determining our perception of art and can also function as a "objective" validation for the intentions of the artist.}, file_url = {/fileadmin/user_upload/files/publications/apgv07-115_4463[0].pdf}, web_url = {http://www.apgv.org/archive/apgv07/}, editor = {Wallraven, C. , V. Sundstedt}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {4th Symposium on Applied Perception in Graphics and Visualization (APGV 2007)}, event_place = {Tübingen, Germany}, state = {published}, ISBN = {978-1-59593-670-7}, DOI = {10.1145/1272582.1272605}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Kaulard K{kascot}{Department Human Perception, Cognition and Action}; K\"urner C{cora}{Department Human Perception, Cognition and Action}; Pepperell R; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4716, title = {Cross modal transfer in face recognition}, year = {2007}, month = {7}, volume = {10}, pages = {66}, abstract = {Prior studies have shown that humans can recognize faces by touch alone but perform poorly in cross-modal face recognition [1]. Here we want to shed further light on haptic face recognition with four experiments using a well-defined stimulus face space based on the morphable MPIFace- Database. Experiment 1 used a same/different task with sequentially presented faces which established that subjects were able to discriminate faces haptically, using short term memory. In Experiment 2 we used an old/new recognition task for which different sets of three faces (out of six) were learned haptically with three subsequent haptic test-blocks and one visual test-block. In contrast to Casey and Newell (2007) we used the same printed face masks for recognition in both modalities. We found that participants could recognize faces haptically although recognition accuracy was low (65%) and tended to decrease across blocks. Cross-modal recognition, however, was at chance level (48%). In Experiment 3, we changed the design such that haptic memory was refreshed before each test-block by repeated exposure to the three learned faces. We found that performance increased significantly to 76% and that it became more consistent across blocks. Most importantly, however, we found clear evidence for cross-modal transfer as visual performance rose above chance level (62%). Our results demonstrate that during visual face recognition, participants have access to information learned during haptic exploration allowing them to perhaps form a visual image from haptic information. In Experiment 4, we interchanged learning and recognition modality with respect to Experiments 2+3, testing within-modality recognition in the visual domain and cross-modal transfer by haptic recognition of the face masks. Using the same experimental design as in Experiment 2, we found that performance in the visual within-modality condition increased significantly to 89% and that it became more consistent across blocks (71% compared to 39% for Experiment 2). However, recognition accuracy decreased across blocks (from 96% to 87%). Interestingly, cross-modal performance was significantly higher than in Experiments 2 (at 69%) demonstrating a clear advantage in cross-modal transfer for vision as the learning modality. The reasons for the observed differences in cross-modal transfer remain to be investigated. Possible factors include differences in visual versus haptic memory permanence, vision as the dominant and therefore preferred learning modality, and finally the role of visual imagery in cross-modal transfer.}, web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=dopjans01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Dopjans L{ldopjans}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4882, title = {Looking Down, Looking Up: Does Stature Influence Face Recognition?}, year = {2007}, month = {7}, volume = {10}, pages = {109}, abstract = {In the German population, men are on average 13 cm taller than women [1]. Smaller people, many of them women, look at other faces from below (viewing angle) while tall people look at others from above. The minimal distance between 2 persons not engaged in mutual gaze is around 50 cm [2]. Thus, with regard to male and female average statures, in close-up situations, the average viewing angle between males and females is around 13 deg. Do people have therefore different “preferred” representations of faces depending on their stature? More specifically, are tall and small people more efficient at processing face seen “from above” and from “below” respectively? Furthermore, do observers have different “preferred” representations of male and female faces because men are on average taller than women? To investigate the influence of stature and sex on face recognition, we first investigated whether efficiency in a sex classification task might be influenced by face orientation. To maximize stature differences between participants, we tested two groups: small women (under 165cm) and tall men (over 180cm). If face representation is influenced by stature, we expect small women to be more efficient (faster) at processing faces viewed as seen from below and vice-versa for tall men. Furthermore, because of natural average stature differences between men and women, efficient categorization of male and female faces might depend on their orientation. We used unfamiliar male and female faces shown at pitch angles between -18 deg (looking downward) to +18 deg (looking upward). We tested participants in a speeded sex classification task. Male and female participants saw 220 faces one by one and had to classify them as male or female as fast as possible. Classification accuracy was high (over 95%). Analysis of reaction times does not show any relation between stature of observer, sex of shown face and its pitch orientation, thus suggesting that face processing with regards to sex is not influenced predominantly by stature of observer or sex of presented face.}, web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=buelthoff01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Wolf T{towolf}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Poster{ 4860, title = {Personal Exploratory Experience of an Object Facilitates Its Subsequent Recognition}, year = {2007}, month = {7}, volume = {10}, pages = {103}, abstract = {Current research shows that human object recognition is sensitive to the learned order of familiar object views (e.g. [1]). This temporal order of views could be determined by how an observer manipulates an object during learning e.g., rigid rotations in depth. In fact, the freedom to manipulate objects during learning is also known to improve subsequent recognition from single static images [2]. In this study, sixteen participants learned novel 3D amoeboid objects by manipulating them in a virtual reality environment. This required the use of a marker tracking system (VICON) and a head-mounted display (z800 3DVisor eMagin). Our participants handled a tracked device whose spatial coordinates, relative to the observers’ viewpoint, determined the position and orientation of a virtual object that was presented via the head-mounted display. Hence, this device acted as a physical substitute for the virtual object and its coordinates were recorded as motion trajectories. In a subsequent old/new recognition test, participants either actively explored or passively viewed old (learned) and new objects in the same setup. Generally, “active” participants performed better than “passive” participants (in terms of sensitivity: d’=1.08 vs. 0.84 respectively). Nonetheless, passive viewing of learned objects that were animated with their learned motion trajectories resulted in comparably good performance (d’=1.13). The performance decrease was specific to passively viewing learned objects that either had their learned motion trajectories temporally reversed (d’=0.69) or followed another observer’s motion trajectories (d’=0.70). Therefore, object recognition performance from passively viewing one’s past explorations of the learned object is comparable to actively exploring the learned object itself. These results provide further support for a dependence on temporal ordering of views during object recognition. Finally, these results could also be considered in the context of studies that highlight the human ability of discriminating one’s own actions from other people’s actions e.g., hand gestures, handwriting, dart-throwing, full-body walking and ballet (for discussion and examples, see [3]). Here, our study also showed better recognition from viewing videos of self-generated actions. Nonetheless, this recognition benefit was specifically for the learned objects, which were not concretely embodied in the observer’s person. Moreover, animating new objects with the participants’ own actions did not increase their familiarity. We conclude by suggesting that our observers’ did not merely show a familiarity with their past actions but rather, with the idiosyncratic visual experiences that their own actions created.}, file_url = {fileadmin/user_upload/files/publications/TWK-2007-Chuang.pdf}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4881, title = {Sex Matters When You Ask the Right Question: What Affects Eye Movements in Face Comparison Tasks?}, year = {2007}, month = {7}, volume = {10}, pages = {108}, abstract = {Knowing where people look in a face provides an objective insight onto the information entering the visual system and into the cognitive processes involved in face perception. Eye-tracking studies on face perception have mostly investigated observers’ viewing behavior when studying single faces. However, in day-to-day situations, humans also compare faces or match a person’s face to a photograph. During comparison, facial information remains visually accessible, freeing observers from time and encoding constraints [1]. Here, we recorded eye movements of human participants while they compared two faces presented simultaneously. We used (i) two different tasks (discrimination or categorization), and (ii) faces differing either in identity or in sex. In addition, we varied (iii) task difficulty, i.e. the similarity of the two faces in a pair. Eye movements to previously defined areas of interest (AOIs) on the faces were analyzed in terms of frequency, duration and the temporal pattern of fixations made. We found that the eyes were fixated most often in the discrimination tasks (37% of all fixations) but the nose in the categorization task (34.5%), while the total number of fixations increased with task difficulty. Faces differing in sex were more difficult to discriminate than faces differing in identity (63% versus 76% correct responses), which was also reflected in more fixations to face pairs differing in sex (14.4 versus 11.8 fixations per trial). With increasing task difficulty, fixations to only some AOIs increased, in accordance with the literature (more to the eyes in the sex and more over all areas in the identity discrimination tasks; [2]). Unexpectedly, we found a striking effect of tasks on performance measures, as over 80% of participants could detect the more feminine of two faces (categorization task) even at the most similar level, but for the same face pairs their performance in a discrimination task was less than 30% correct. Another interesting finding is that observers mostly compared the inner halves of the two faces of a pair, instead of the corresponding features (e.g., the left eye of the left face with the left eye of the right face). This viewing behavior remained the same in a control experiment where participants’ head was not fixed. Quite surprisingly, female participants fixated significantly more often the eyes of the face stimuli than male participants, but only when the sex of the faces was a relevant feature in the task.}, web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=armann01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 4852, title = {Updating of Attention Allocation in Parietal Cortex}, year = {2007}, month = {7}, volume = {10}, pages = {49}, abstract = {Attention determines which aspects of the incoming sensory information are processed with priority. However, attention is seldom an all-or-none process but rather distributed over multiple kinds of incoming information, and this distribution must be updated according to events in the world. Despite its ubiquity, this dynamic updating has been little studied in psychophysics, and even less is known about its neural correlates. In order to investigate attention updating, we studied serial detection of targets in different dimensions (color, shape or motion) of visual stimuli. Performance changed according to target sequence, and could be explained by this simple behavioral model: Each detected target was followed by a discrete attention shift towards the dimension in which the target occurred, leading to a short-lasting, exponentially decaying performance benefit. Continuously changing performance over time reflected the dynamic updating of attention induced by the sequence of detected targets. BOLD signal predicted by this time-course of attention changes was found exclusively in left parietal cortex, suggesting that neural activity in this area directly reflects how world events influence the distribution of attention.}, web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=schultz01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Lennert T{lennert}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4466, title = {In the Eye of the Beholder: Perception of Indeterminate Art}, year = {2007}, month = {6}, pages = {121-128}, abstract = {How do we interpret an object - a scene - a painting? Perception research and art illuminate from different angles how the vast amount of information in our visually perceived environment is processed by the viewer to form a coherent and consistent interpretation of the world. Using drawings and paintings by the artist Robert Pepperell, this work attempts to connect these different world views. Pepperell's paintings at first glance seem to be a baroque fresco, an expressionist still-life, or a cubist collage; taking a closer look, however, this concrete interpretation vanishes and we are left with an indeterminate painting. Using psychophysical experiments and eye tracking measures, in this work we seek to illuminate the visual processing of information in Pepperell's paintings. More specifically, we will investigate how the pattern of fixations - the loci of interest - change as a function of the task ("What is depicted in this scene?" vs. "Does this image contain people?") and of the image content. The interpretation of the experimental results in the context of perceptual research will give first insights into the perception of (indeterminate) art. Conversely, the results are also relevant for art, as they provide a kind of perceptual, measurable "validation" of the artist's intentions.}, file_url = {fileadmin/user_upload/files/publications/CAe-2007-Wallraven.pdf}, web_url = {http://www.eg.org/EG/DL/WS/COMPAESTH/COMPAESTH07}, editor = {Cunningham, D. W., G. W. Meyer, L. Neumann, A. Dunning, R. Paricio}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, booktitle = {Computational Aesthetics 2007}, event_name = {Eurographics Workshop on Computational Aesthetics in Graphics, Visualization and Imaging (CAe '07)}, event_place = {Banff, Alberta, Canada}, state = {published}, ISBN = {978-3-905673-43-2}, DOI = {10.2312/COMPAESTH/COMPAESTH07/121-128}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Kaulard K{kascot}{Department Human Perception, Cognition and Action}; K\"urner C{cora}{Department Human Perception, Cognition and Action}; Pepperell R; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4690, title = {Sex matters when you ask the right question: What affects eye movements in face comparison tasks?}, journal = {Journal of Vision}, year = {2007}, month = {6}, volume = {7}, number = {9}, pages = {5}, abstract = {Eye-tracking studies on face perception have mostly investigated observer's eye movement behavior when studying single faces. However, in day-to-day situations, humans also compare faces or try to match a person's face to a photograph. During comparison, facial information remains visually accessible. This frees observers from time and encoding constraints (Galpin & Underwood, 2005). Here, we present eye movement data of participants required to compare two faces that were presented side by side. We used (1) two different tasks (discrimination or categorization), and (2) two types of face stimuli: faces differing either in identity or in sex. In addition, we varied for (3) task difficulty i.e. the similarity of the two faces in a pair. Eye-fixations in predefined facial regions were recorded and analyzed, for example, with regards to their frequency and duration. Our findings reveal, for instance, that the eyes were fixated more often in the discrimination tasks (38% of all fixations) than in the categorization task (29%), while the total number of fixations increased significantly with increasing task difficulty (p [[lt]] 0.001 in all cases, N=20). Faces differing in sex were more difficult to discriminate than faces differing in identity (63 % versus 76 % correct responses), which was reflected by increased fixations to face pairs that differed in sex (14.4 versus 11.8 fixations per trial). Unexpectedly, we found a striking effect of tasks on performance measures, as over 80 % of participants could detect the more feminine of two faces (categorization task) even at the most similar level, but for the same face pairs their performance in a discrimination task was less than 30 % correct. Viewing behavior of male and female participants differed, but only when the sex of the faces was relevant for the task.}, file_url = {/fileadmin/user_upload/files/publications/Poster_VSS_2007_[0].pdf}, web_url = {http://www.journalofvision.org/7/9/5/}, event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/7.9.5}, author = {Armann R{armann}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5036, title = {The role of surface and shape information in the other-race face effect}, journal = {Journal of Vision}, year = {2007}, month = {6}, volume = {7}, number = {9}, pages = {7}, abstract = {Both shape and surface dimensions play an important role in face (e.g. O'Toole et al., 1999) and race recognition (Hill et al., 1995). However, the relative contribution of these cues to other-race (OR) face recognition has not been investigated. Some facial properties may be diagnostic in one race but not in the other (e.g. Valentine, 1991). Observers of different races would rely on facial cues that are diagnostic for their own-race faces, a phenomenon which could partly explain our relative difficulty at recognizing OR faces at the individual level (the so-called other-race effect). Here, we tested this hypothesis by examining the relative role of shape and surface properties in the other-race effect (ORE). For this purpose, we used Asian and Caucasian faces from the MPI face database (Vetter & Blanz, 1999) so that we could vary both shape and surface information, only shape information (in which the surface texture was averaged across individual faces of the same race), or only surface information (in which shape was averaged). The ORE was measured in Asian and Caucasian participants using an old/new recognition task. When faces varied along both shape and surface dimensions, Asians and Caucasians showed a strong ORE (i.e. a better recognition performance for same- than other-race faces). With faces varying along only shape dimensions, the ORE was no longer observed in Asians, but remained present in Caucasians. Finally, when presented with faces varying only along surfacedimensions, the ORE was not found for Caucasians whereas it was present in Asians. These results suggest that the difficulty in recognizing OR faces for Asian observers can be partly due to their inability to discriminate among surface properties of OR faces, whereas ORE for Caucasian participants would be mainly due to their inability to discriminate among shape cues of OR faces.}, web_url = {http://www.journalofvision.org/7/9/7/}, event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/7.9.7}, author = {Michel C; Rossion B; Hayward W; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Vuong Q{qvuong}{Department Human Perception, Cognition and Action}} } @Poster{ 4872, title = {Classification for visually impaired athletes: An interim report}, journal = {Medicine and Science in Sports and Exercise}, year = {2007}, month = {5}, volume = {39}, number = {5 Supplement}, pages = {265}, web_url = {http://www.acsm-msse.org/pt/re/msse/toc.00005768-200705001-00000.htm;jsessionid=H0cLMJXk6FVLQq5z1J92TRbHKJW195vvzc5hNCMjsNVZsJZMpM6p!-383192544!181195628!8091!-1}, state = {published}, author = {Jendrusch G; Janda S; Kaulard K{kascot}; Bolsinger A; Bach M; Lingelbach B; Platen P} } @Poster{ 4265, title = {Implicit Wiener Series for Estimating Nonlinear Receptive Fields}, journal = {Neuroforum}, year = {2007}, month = {4}, volume = {13}, number = {Supplement}, pages = {1199}, abstract = {The representation of the nonlinear response properties of a neuron by a Wiener series expansion has enjoyed a certain popularity in the past, but its application has been limited to rather low-dimensional and weakly nonlinear systems due to the exponential growth of the number of terms that have to be estimated. A recently developed estimation method [1] utilizes the kernel techniques widely used in the machine learning community to implicitly represent the Wiener series as an element of an abstract dot product space. In contrast to the classical estimation methods for the Wiener series, the estimation complexity of the implicit representation is linear in the input dimensionality and independent of the degree of nonlinearity. From the neural system identification point of view, the proposed estimation method has several advantages: 1. Due to the linear dependence of the estimation complexity on input dimensionality, system identification can be also done for systems acting on high-dimensional inputs such as images or video sequences. 2. Compared to classical cross-correlation techniques (such as spike-triggered average or covariance estimates), similar accuracies can be achieved with a considerably smaller amount of data. 3. The new technique does not need white noise as input, but works for arbitrary classes of input signals such as, e.g., natural image patches. 4. Regularisation concepts from machine learning to identify systems with noise-contaminated output signals. We present an application of the implicit Wiener series to find the low-dimensional stimulus subspace which accounts for most of the neuron's activity. We approximate the second-order term of a full Wiener series model with a set of parallel cascades consisting of a linear receptive field and a static nonlinearity. This type of approximation is known as reduced set technique in machine learning. We compare our results on simulated and physiological datasets to existing identification techniques in terms of prediction performance and accuracy of the obtained subspaces.}, web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf}, event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference}, event_place = {Göttingen, Germany}, state = {published}, author = {Franz MO{mof}{Department Empirical Inference}; Macke JH{jakob}; Saleem A; Schultz SR} } @Conference{ 4256, title = {An active approach to object recognition}, year = {2006}, month = {11}, volume = {7}, pages = {13}, abstract = {In visual object recognition, it is important to understand which object properties are important for learning. Typically, this is done by comparing recognition performance across experimental conditions that manipulate and isolate different aspects of object properties e.g., distinctive features. However, such an approach requires object properties to be explicitly specified prior to testing and is, hence, limited by the experimenter’simagination (or the lack thereof). Here, I will present a different approach to studying this problem. Rather than predefine the object properties of interest, participants are free to explore all aspects of a set of novel 3D objects during learning. Raw data are collected on observers’ patterns of exploration and analyses are subsequently applied to understand which object properties are valued by the observers during learning. In my presentation, I will describe the technical apparatus that supports this experimental approach. In addition, I will provide details on how raw data are collected and the methods of post-hoc analyses that can be applied to the data. There are several advantages to this approach in addition to those already mentioned. Firstly, this approach places control in the hands of the observer. Thus, stimulus presentation is determined by the observer’s goals rather than the experimenter’s preconceptions. This results in findings that are closer to ecological validity. Also, the raw data lend itself to reanalysis when new methods of analyses are devised or when previously unconsidered object properties later prove to be relevant for object learning. The purpose of this presentation is to generate an open discussion on the merits and disadvantages of this approach to studying visual object recognition.}, event_name = {7th Conference of the Junior Neuroscientists of Tübingen (NeNa 2006)}, event_place = {Oberjoch, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inbook{ 3812, title = {The role of familiarity in the recognition of static and dynamic objects}, year = {2006}, month = {10}, pages = {315-325}, abstract = {Although the perception of our world is experienced as effortless, the processes that underlie object recognition in the brain are often difficult to determine. In this article we review the effects of familiarity on the recognition of moving or static objects. In particular, we concentrate on exemplar-level stimuli such as walking humans, unfamiliar objects and faces. We found that the perception of these objects can be affected by their familiarity; for example the learned view of an object or the learned dynamic pattern can influence object perception. Deviations in the viewpoint from the familiar viewpoint, or changes in the temporal pattern of the objects can result in some reduction of efficiency in the perception of the object. Furthermore, more efficient sex categorization and cross-modal matching was found for familiar than for unfamiliar faces. In sum, we find that our perceptual system is organized around familiar events and that perception is most efficient with these learned events.}, file_url = {/fileadmin/user_upload/files/publications/Visual%20Perception_Part_1_315-325_middle_3812[0].pdf}, web_url = {http://www.elsevier.com/wps/find/bookdescription.cws_home/710077/description#description}, editor = {Martinez-Conde, S. , S. Macknick, L. Martinez, J.-M. Alonso, P. Tse}, publisher = {Elsevier}, address = {Amsterdam, Netherlands}, series = {Progress in Brain Research ; 154A}, booktitle = {Visual Perception Part 1: Fundamentals of vision: Low and Mid-level processes in perception}, state = {published}, ISBN = {978-0-444-52966-4}, DOI = {10.1016/S0079-6123(06)54017-8}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 4093, title = {Neural correlates of attentional modulation induced by trial history}, year = {2006}, month = {10}, volume = {36}, number = {567.17}, abstract = {Temporal patterning of stimuli can affect performance and be critical for perceptual learning. We studied the neural correlates of trial history effects using a task in which detection time was influenced by target history. Using an MRI scanner we measured BOLD signal changes while 12 subjects were presented with streams of stimuli of variable colors, shapes, and motion directions. Participants had to attend to all 3 stimulus dimensions simultaneously to report targets consisting of unpredictable stimulus feature repetitions. Response times for targets in each stimulus dimension decreased exponentially with the number of successive targets and were well explained by a leaky integrator of target history with fast exponential decay (half-life = 1.21 trials). Significant BOLD responses (random-effects analysis over 12 subjects, threshold = p<0.05 corrected for family-wise errors at the cluster level for all reported effects) predicted by theoretical neuronal activity reflecting the leaky integrator output for all stimulus dimensions were found bilaterally in the striatum (putamen, head and body of caudate). In addition, significant BOLD signal increases were observed in response to detected targets of any stimulus dimension in lateral occipital cortex and fusiform gyri bilaterally, with specific responses in regions compatible with area MT for motion targets, with area LO for shape targets and with area V4v for color targets. Our behavioural data show that detected targets induce a benefit in response time for subsequent targets in the same stimulus dimension, that this acceleration effect is short-lived and can be modelled by a leaky integrator of target history. Our fMRI data show 1) BOLD signal increases compatible with neural activity reflecting the leaky integrator signals in striatum, in line with a role of the striatum in guiding motor behaviour in response to sensory cues and 2) BOLD signal increases in extrastriate visual areas in response to detected targets, reflecting immediate sensory consequences of detected targets.}, web_url = {http://www.sfn.org/index.aspx?pagename=abstracts_ampublications}, event_name = {36th Annual Meeting of the Society for Neuroscience (Neuroscience 2006)}, event_place = {Atlanta, GA, USA}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ ConradMS2006, title = {Breaking the stability of perceptual instability: Temporal dynamics of ambiguous figure reversal and interference from distractor patterns}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {101-102}, abstract = {During continuous viewing of multistable figures, such as the Necker cube, perception alternates between equally valid solutions. So how can perceptual experience be stable given that various alternative interpretations of the same physical stimulus are available? Previous demonstrations with bistable stimuli have revealed that a repetitive intermittent presentation leads to a stabilisation of the percept. Recent research findings suggested that interleaved presentation of several ambiguous stimuli does not disrupt the perceptual stabilisation of each reversible pattern, suggesting that perceptual 'memory stores' coexist independently for each representation. Interference effects were only obtained for structurally similar stimuli. In the present study, we adopted Maier et al's interleaved presentation paradigm to investigate the effects of interfering ambiguous patterns upon transition probability and the stabilisation process. Rather than manipulating structural similarities between interleaved ambiguous stimuli, we sequentially presented ambiguous figures that share equivalent reversal processes such as figure - ground segregation or perspective reversal. The results reveal that perceptual dominance time of the ambiguous test stimulus decreases compared to periods during which a blank interval is presented, indicating an effect of interference from the distractor. Interaction between reversal processes influences the stabilisation of perception that is normally observed during repetitive intermittent presentation with blank intervals.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Conrad V{conrad}{Department Human Perception, Cognition and Action}; McDonald JS{jsm}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffN2006, title = {Cross-modal interaction can modulate face distinctiveness}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {204}, abstract = {We had shown that memory for a face can be influenced by the distinctiveness of an utterance to which it has been associated (Bülthoff and Newell, 2004 Perception 33 Supplement, 108). Furthermore, recognition of a face can be primed by a paired utterance, suggesting that there is a tight, cross-modal coupling between visual and auditory stimuli and that face distinctiveness can be influenced by cross-modal interaction with auditory stimuli like utterances. When instrumental sounds are used instead of utterances, the perceptual quality of auditory stimuli seemed also to affect memory for faces. Here we further investigated whether instrumental sounds can also prime face recognition. Our results show that this is not the case; arbitrary auditory stimuli do not prime recognition of faces. This suggests that utterances are easier to associate closely with faces than arbitrary sounds. We also investigated whether the observed priming effect of utterances might have been based on the use of different first names in each utterance. We repeated the priming experiment using the same utterances, but name information was removed. A significant priming effect was observed. Thus the semantic information related to the first name is not decisive for the priming effect of utterances on face recognition.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangVTB2006, title = {Familiar form and motion influence perceptual dominance}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {33}, abstract = {Binocular rivalry can occur when two different stimuli are presented separately to each eye. Typically, the dominant percept alternates between the two presented stimuli. Prior studies have shown that perceptual dominance can be induced by low-level factors such as luminance as well as high-level factors such as object categories, suggesting that rivalry reflects competition at multiple levels of visual processing. Here, we investigated whether learned shape and motion of rigidly rotating objects can bias perceptual dominance during binocular rivalry. Observers first learned four novel objects that each rotated in a specific direction. These objects were randomly created by free-form deformation techniques. Following learning, we induced binocular rivalry between a learned object and a novel distractor. The learned object could rotate in its learned or reversed direction. For comparison purposes, we also included pairs of only novel objects. Initial results show that learned objects rotating in their learned direction are perceptually dominant more often than the paired distractors. Learned objects rotating in reverse do not appear to differ from novel objects in terms of perceived dominance. These findings suggest that binocular rivalry could provide a useful implicit measure of the roles played by shape and motion during object recognition.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4226, title = {Human perception and recognition of metric changes of part-based dynamic novel objects}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {99}, abstract = {The role of object parts is a key issue in object recognition. Here we investigated whether observers encode qualitative (eg straight versus curved part) or metric information of parts (eg curvature magnitude), and whether the information that is encoded can be affected by motion. To address these issues, we constructed a novel set of objects composed of parts that can vary metrically along different dimensions (eg tapering and bending) to create qualitatively different parts. In a same/different matching task, we presented two objects rigidly rotating in the same or different direction, and had observers judge whether these objects were the same or different. We varied the pair of objects along an ‘identity‘ axis by morphing between two exemplars. A cumulative Gaussian function explained the effect of morph level, suggesting that observers encoded metric information. There was a slight shift of the psychometric function for same versus different motion. Overall, our results suggest that observers are sensitive to metric information, even for objects with salient part structure. We are currently investigating with fMRI how object parts and motion influence neuronal object processing.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg, Russia}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ 4092, title = {Attentional modulation by trial history}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {128}, abstract = {Temporal patterning of stimuli can affect performance and be critical for perceptual learning. We tested whether trial history can explain target detection time even when target occurrence is unpredictable. 12 volunteers were presented with streams of stimuli of variable color, shape, and motion direction, and had to attend to all stimulus dimensions simultaneously to report Poisson-determined, 1-back repetitions in either dimension. Response times decreased exponentially with the number of successive targets (group means for 1 to 4 targets in succession: 1050, 763, 717, 722 milliseconds; 2-way repeated measures ANOVA: F(3,33) = 195, p&amp;lt;&amp;lt;0.0001, no main effect of stimulus dimension but interaction between dimension and number of successive targets: F(6,66) = 5.11, p&amp;lt;0.001). Response times were well explained by a leaky integrator of trial history with fast exponential decay (half-life = 1.21 trials; correlation coefficients significant at p&amp;lt;0.0002 for all dimensions and subjects; group mean correlation coefficients for color, shape and motion targets: 0.57(0.03), 0.57(0.02), 0.47(0.03)). Our results show that target detection times can be altered by trial history, and explainable by a fast-decaying integration of trial history. We propose that trial history modulates attention resulting in response time changes; we are currently investigating this hypothesis using functional neuroimaging.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg, Russia}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 4224, title = {Motion from the bottom up: From detection to cognition}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {69}, abstract = {Motion signals projected onto the retina serve many different yet essential behavioral functions: from quickly detecting objects and segmenting them from background clutter, to effectively navigating through a dynamic environment and recognizing and interacting with objects populating that environment. Not surprisingly, computer scientists, psychologists, cognitive scientists, and neuroscientists alike have actively studied the perception and processing of visual motion. Until recently, the general approach has been to investigate mechanisms of motion perception relevant for specific purposes and typically focused at a specific level of processing, such as stimulus- or cognitively-driven mechanisms. Although this approach has greatly extended our knowledge and appreciation of visual motion processing, it is less clear how motion information relates across these different levels. The purpose of this symposium is to bridge the gap between these levels of visual motion processing and foster discussion between re searchers across the various levels.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg, Russia}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Pilz KS{kpilz}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ 4873, title = {Classification for visually impaired athletes: An interim report}, journal = {Abstracts of the 11th Annual Congress of the European College of Sport Science (ECSS Lausanne 2006)}, year = {2006}, month = {7}, volume = {11}, pages = {267-268}, state = {published}, author = {Janda S; Jendrusch G; Platen P; Bolsinger A; Bach M; Kaulard K{kascot}; Lingelbach B} } @Poster{ 4007, title = {Role of familiar object motion in recognising objects across viewpoints}, journal = {Journal of Vision}, year = {2006}, month = {6}, volume = {6}, number = {6}, pages = {314}, abstract = {Unfamiliar viewpoints can hinder visual object recognition from 2D static images. Here, we ask whether the same is true when visual input is in the form of dynamic spatio-temporal sequences, such as would accompany object or observer motion. Previous research has shown that such motion can be characteristic for a particular object and hence provide additional cues to identity. In two experiments we demonstrate that learned object motion can facilitate recognition across unfamiliar viewpoints. In each experiment, 24 participants were trained to discriminate between two novel amoeboid-like objects seen from a fixed viewpoint. These objects either deformed nonrigidly (Experiment 1) or rotated rigidly about a horizontal axis (Experiment 2). Both types of motion presented the observer with a coherent sequence of change that had a unique temporal order. After training, participants underwent a 2-interval-forced-choice task that tested their ability to discriminate the two learned objects from two novel objects. At test, objects were presented at 0°, 10°, 20° and 30° around the vertical axis relative to the learned viewpoint, and in the learned or reversed temporal order. The manipulation of temporal order has previously been used to study the contribution of motion to object recognition. In both experiments, accuracy decreased with increasing rotations away from the learned viewpoint and there was a constant benefit for learned object motion across all viewpoints tested (Experiment 1 = 4.9%; Experiment 2 = 5.3%). These results indicate that both rigid and non-rigid motion facilitated object recognition despite disturbances in 2D shape by viewpoint changes.}, web_url = {http://www.journalofvision.org/content/6/6/314.short?related-urls=yes&legid=jov;6/6/314}, event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/6.6.314}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4059, title = {Voices, not arbitrary sounds, prime the recognition of familiar faces}, journal = {Journal of Vision}, year = {2006}, month = {6}, volume = {6}, number = {6}, pages = {10}, abstract = {Our previous studies have shown that memory for a face can be affected by the distinctiveness of a voice to which it had been paired (Bülthoff & Newell, ECVP2004). Moreover, we showed that voices can prime face recognition, suggesting a tight, cross-modal coupling between both types of stimuli. Further investigations however, seemed to suggest that non person-related audio stimuli could also affect memory for faces. For example, faces that had been associated with distinctive instrumental sounds were indeed better recognized in an old/new task than faces paired to typical sounds. Here we investigated whether these arbitrary sounds can also prime face recognition. Our results suggest that arbitrary audio stimuli do not prime recognition of faces. This finding suggests that attentional differences may have resulted in better recognition performance for faces paired to distinctive sounds in the explicit old/new task. Voices are easier to associate closely to faces. We also investigated whethe r the voice priming effect found earlier might be based on the use of different first names in each audio stimulus, that is, whether the effect was based on semantic rather than perceptual information. We repeated the priming experiment using the same voice stimuli, but name information was removed. The results show that there is still a significant priming effect of voices to faces, albeit weaker than in the full voice experiment. The semantic information related to the first name helps but is not be decisive for the priming effect of voices on face recognition.}, web_url = {http://journalofvision.org/6/6/10/}, event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/6.6.10}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Article{ 3769, title = {Recognising face identity from natural and morphed smiles}, journal = {Quarterly Journal of Experimental Psychology}, year = {2006}, month = {5}, volume = {59}, number = {5}, pages = {801-808}, abstract = {It is easier to identify a degraded familiar face when it is shown moving (smiling, talking; nonrigid motion), than when it is displayed as a static image (Knight & Johnston, 1997; Lander, Christie, & Bruce, 1999). Here we explore the theoretical underpinnings of the moving face recognition advantage. In Experiment 1 we show that the identification of personally familiar faces when shown naturally smiling is significantly better than when the person is shown artificially smiling (morphed motion), as a single static neutral image or as a single static smiling image. In Experiment 2 we demonstrate that speeding up the motion significantly impairs the recognition of identity from natural smiles, but has little effect on morphed smiles. We conclude that the recognition advantage for face motion does not reflect a general benefit for motion, but suggests that, for familiar faces, information about their characteristic motion is stored in memory.}, web_url = {http://www.informaworld.com/smpp/ftinterface~content=a746009644~fulltext=713240930}, state = {published}, DOI = {10.1080/17470210600576136}, author = {Lander K; Chuang L{chuang}; Wickham L} } @Article{ 3770, title = {Recognising novel deforming objects}, journal = {Visual Cognition}, year = {2006}, month = {5}, volume = {14}, number = {1}, pages = {85-88}, abstract = {Current theories of visual object recognition tend to focus on static properties, particularly shape. Nonetheless, visual perception is a dynamic experience–as a result of active observers or moving objects. Here, we investigate whether dynamic information can influence visual object-learning. Three learning experiments were conducted that required participants to learn and subsequently recognize different non-rigid objects that deformed over time. Consistent with previous studies of rigid depth-rotation, our results indicate that human observers do represent object-motion. Furthermore, our data suggest that dynamic information could compensate for when static cues are less reliable, for example, as a result of viewpoint variation.}, web_url = {http://www.informaworld.com/smpp/ftinterface~content=a747834181~fulltext=713240930}, state = {published}, DOI = {10.1080/13506280600627756}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4874, title = {Classification for visually impaired: A stocktaking report and solutions for the future}, journal = {Abstracts of the VISTA 2006 Conference}, year = {2006}, month = {5}, volume = {2006}, pages = {20-21}, web_url = {http://www.paralympic.org/release/Main_Sections_Menu/IPC_Events/Vista_Conference_2006/2006_04_28_Abstract_Booklet.pdf}, state = {published}, author = {Jendrusch G; Bolsinger A; Janda S; Zrenner E; Bach M; Kaulard K{kascot}; Lingelbach B} } @Poster{ 4828, title = {A Model of Theory-Of-Mind Based on Action Prediction}, year = {2006}, month = {3}, volume = {9}, pages = {69}, abstract = {Theory-of-Mind, or mentalising, is defined as a cognitive process used to understand other peoples’ actions based on mental states. Twomain theories of mentalising have been put forward in recent years: Simulation Theory and Theory-Theory. We propose a model of mentalising based on action prediction and semantic representation. The model would be triggered whenever a human observer detects a potential agent (particularly other humans, but also other animals or active entities). On the basis of their actions, it would associate a possible mental state to the observed agent and predict its future behaviour. To do this, first a search engine would look for a potential mental state matching an observed action in a look-up table containing actionmental state associations acquired through experience. Then, a predictor would calculate a possible next action for the observed agent on the basis of the mental state, and a comparator would compare this predicted action to the actual next action of the agent. If the discrepance between predicted and actual behaviour is greater than a threshold, the mental state is rejected and the process repeated until a conclusive match or abandon. The predictor is postulated to be similar to mechanisms thought to underlie motor learning or reinforcement learning, while the look-up table could resemble semantic representations of objects or faces. The model could also be used for active interaction with other agents: the search engine would find an action to be executed by the observer in order to induce a particular mental state in the observed agent. Success could be assessed by the model through observation of the other agents’ reaction. The neural correlates for this model are likely to be distributed and could include the posterior part of the superior temporal sulcus, the medial prefrontal cortex, the temporal poles, the premotor cortex and the cerebellum. To assess the plausibility of the model and test possible associations between particular neural structures and the components of the model, we review previous studies of the neural correlates of mentalising and some associated processes.}, web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=schultz01}, event_name = {9th Tübingen Perception Conference (TWK 2006)}, event_place = {Tübingen, Germany}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; de Vignemont F} } @Poster{ 4829, title = {Face Distinctiveness can be Modulated by Cross-Modal Interaction with Auditory Stimuli}, year = {2006}, month = {3}, volume = {9}, pages = {72}, abstract = {In this study we ask whether visually typical faces can become perceptually distinctive when they are paired to auditory stimuli that are distinctive. In a first set of experiments (B¨ulthoff & Newell, ECVP 2004), we had investigated the effect of voice distinctiveness on face recognition. Memory for a face can be influenced by the distinctiveness of an utterance to which it has been associated. Furthermore, recognition of a familiar face can be primed by a paired utterance. These findings suggest that there is a tight, cross-modal coupling between the faces presented and the associated utterances and that face distinctiveness can be influenced by crossmodal interaction with auditory stimuli like voices. In another set of experiment, we used instrumental sounds instead of voices and showed that arbitrary auditory stimuli could also affect memory for faces. Faces that had been paired with distinctive instrumental sounds were better recognized in an old/new task than faces paired to typical instrumental sounds. Here we investigated whether these instrumental sounds can also prime face recognition although these auditory stimuli are not associated to faces naturally as voices are. Our results suggest that this is not the case; arbitrary audio stimuli do not prime recognition of faces. This finding suggests that attentional differences may have resulted in better recognition performance for faces paired to distinctive sounds in the old/new task. It also suggests that utterances are easier to associate closely to faces than arbitrary sounds. In a last set of experiments we investigated whether the voice priming effect shown in the first set of experiments might be based on the use of different first names in each utterance. Thus, we asked whether semantic rather than perceptual information was determinant in the used utterances. We repeated the priming experiment using the same voice stimuli, but name information was removed. The results show that there is still a significant priming effect of voices to faces, albeit weaker than in the full voice experiment. The semantic information related to the first name helps but is not be decisive for the priming effect of voices on face recognition.}, web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=buelthoff01}, event_name = {9th Tübingen Perception Conference (TWK 2006)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 4839, title = {Recognising Dynamic Object Across Viepoints}, year = {2006}, month = {3}, volume = {9}, pages = {118}, abstract = {Recognizing objects across viewpoints presents the visual system with an extremely challenging task. This would be particularly true if learned representations were solely determined by spatial properties. However, a number of recent studies have shown that observers are also highly sensitive to characteristic object motion. Could the availability of characteristic spatial-temporal patterns in the natural environment help explain the ability to generalise across viewpoints? Here, we examined how familiar object motion (both rigid and nonrigid) improves object recognition across different viewpoints. In both experiments, participants were first familiarised with two novel dynamic objects from a fixed viewpoint. These objects presented the observer with a coherent sequence of change that had a unique temporal order, resulting from either rotating a rigid object about the horizontal axis (Experiment 1) or through a characteristic deformation of a nonrigid object (Experiment 2). Subsequently, participants were tested for their ability to discriminate these learned objects from new distractors using a 2-interval-forced-choice task. During test, objects were presented at 0°, 10°, 20° and 30° around the vertical axis relative to the learned viewpoint, and in the learned or reversed temporal order. Motion reversal is a common manipulation used to disrupt spatiotemporal properties, without interfering with the object’s spatial characteristics. In both experiments, accuracy decreased with increasing variance from the learned viewpoint. Nonetheless, objects were consistently better recognised when presented in the learned motion sequence (mean accuracy: Expt 1 = 86%; Expt 2 = 81%)compared to the reverse motion condition (mean accuracy: Expt 1 = 81%; Expt 2 = 76%), across all viewpoints tested (Expt 1: F(1,23)=13.94, p<0.01; Expt 2: F(1,23)=8.78, p<0.01). These results indicate that both rigid and non-rigid motion facilitated object recognition despite disturbances in 2D shape by viewpoint changes.}, file_url = {fileadmin/user_upload/files/publications/TWK-Chuang.pdf}, event_name = {9th Tübingen Perception Conference (TWK 2006)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4847, title = {The Visual System's Representation of Natural Images}, year = {2006}, month = {3}, volume = {9}, pages = {172}, abstract = {Previous studies (Atick and Redlich, Field, Webster and Miyahara) have investigated how the visual system could optimally represent the 1/f amplitude spectrum of natural images. Computational studies (Atick and Redlich, Field) suggest that the cortical representation ought to be a “whitened” version of the amplitude spectrum of natural images, i.e. spatial frequencies are equally represented despite the abundance of low spatial frequencies and dearth of high spatial frequencies in photographs of real world scenes. Webster and Miyahara showed that adaptation to natural images attenuates sensitivity to low spatial frequencies effectively supporting the computational evidence. We attempt to measure to what degree different spatial frequencies contribute to the percept of an image, in order to determine the extent of whitening of the input. To do this we adapted subjects briefly (250ms) to textures (4 x 4 degrees) of different spatial frequencies (1, 2, 4, 8, 16 cycles/degree, bandwidth 1.4 octaves—full width at half maximum). Then we measured the perceived contrast of 1/f textures in the adapted region of the visual field using the following procedure: After each interval of adaptation subjects judged whether the texture in the adapted region had a higher or lower contrast than that of the same texture in a non-adapted region. The contrast of the comparator texture (non-adapted) was changed after each time the subject made a judgement according to a 1 up 1 down staircase. We found that attenuation of perceived contrast, due to adaptation, is greatest when the adapting frequencies are at the peak of the contrast sensitivity function. It seems there is some “whitening”; however this is, at best, incomplete.}, web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=mcdonald01}, event_name = {9th Tübingen Perception Conference (TWK 2006)}, event_place = {Tübingen, Germany}, state = {published}, author = {McDonald JS{jsm}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}} } @Conference{ 3815, title = {Investigating face recognition with voices and face morphs}, year = {2006}, month = {1}, abstract = {Investigating face recognition with voices and face morphs Humans can easily identify faces at the individual level although faces belong to a class of objects with high similarity between exemplars. Characterizing conditions for which faces are more easily recognized allows us to better understand the mechanisms underlying face recognition. Numerous studies have shown that distinctive faces are better recognized than typical faces. Those results have implication for the mental representation of faces. In a set of experiments we tested cross-modal effects of distinctiveness. More specifically we asked whether distinctive voices can improve memory for otherwise typical faces. Our results suggest that the quality of information in one modality, i.e., audition, can affect recognition in another modality, i.e., vision; thus showing that face distinctiveness can be of multi-modal nature. Because we encounter faces of only two sexes but recognize faces of innumerable different identities, it is often implicitly assumed that sex classification is an easier task than identification. We investigated how sensitive we are to variations of identity-related features or sex-related features of highly familiar faces. The results suggest that while extracting and processing sex-related information from a face is a comparatively easy task, we do not seem to retain sex-related facial information in memory as accurately as identity-related information. These results have implications for models of face representation and face processing.}, event_name = {Face Mini-Symposium: Georg-August-Universität Göttingen, Zentrum für Neurobiologie des Verhaltens}, event_place = {Göttingen, Gemany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ 4871, title = {Optimierung der Klassifizierung im Blinden- und Sehbehindertensport}, journal = {BISp-Jahrbuch Forschungsförderung}, year = {2006}, volume = {2005/06}, pages = {83-88}, abstract = {Im Sinne einer ersten Bestandsaufnahme wurden zunächst Sehtestergebnisse und ophthalmologische Befunde sowie Klassifizierungsergebnisse, die vor/bei internationalen Wettkämpfen der Sehbehinderten (Paralympics, Weltmeisterschaften etc.) erhoben wurden, ausgewertet. Dabei wurden Unterschiede in den visuellen Leistungsprofilen (Visus, Gesichtsfeld) und den ophthalmologischen Befunden in den verschiedenen Startklassen (B1-B3) und Sportarten analysiert. Da die Klassifizierung von Sehbehinderungen bisher größtenteils visusbasiert ist, das zur Sehschärfebestimmung eingesetzte Verfahren (die S.O.S.H.-Low Vision Chart (S.O.S.H. = Student Optometric Service to Humanity); vgl. Sehprobentafel in Abb. 1, links) aber nicht der Europäischen Norm EN ISO 8596 entspricht, wurde im Rahmen der Internationalen Deutschen Meisterschaft der Leichtathletik in Berlin (2005) sowie der Qualifikationsrunde zur Deutschen Meisterschaft im Torball in Kassel (2006) versucht, modernere computerunterstützte (automatisierte) und normenkonforme Verfahren zur Visusbestimmung zu erproben. Da neben der Sehschärfe im Sport vor allem das Kontrastsehen, das Dynamische Sehen und das Periphere Sehen von großer Bedeutung sind (Mester, 1988; Tidow, 1996; Jendrusch, 1995; Jendrusch & Brach, 2003), die bei der Klassifizierung bisher aber unberücksichtigt bleiben, wurden ferner weitere neu entwickelte Verfahren zur visuellen Leistungsdiagnostik erprobt.}, web_url = {http://www.bisp.de/cln_050/nn_113306/SharedDocs/Downloads/Publikationen/Jahrbuch/Jb__200506__Artikel/Heck,templateId=raw,property=publicationFile.pdf/Heck.pdf}, state = {published}, author = {Jendrusch G; Bolsinger A; Janda S; Bach M; Kaulard K{kascot}; Lingelbach B; Heck H} } @Inbook{ 3351, title = {Objektwahrnehmung}, year = {2006}, pages = {165-172}, file_url = {/fileadmin/user_upload/files/publications/Handbuch_der_allgemeinen_Psychologie_165-172_middle_3351[0].pdf}, web_url = {https://www.hogrefe.de/shop/handbuch-der-allgemeinen-psychologie-kognition-65550.html}, editor = {Funke, J. , P. A. Frensch}, publisher = {Hogrefe}, address = {Göttingen, Germany}, series = {Handbuch der Psychologie ; 5}, booktitle = {Handbuch der Allgemeinen Psychologie: Kognition}, state = {published}, ISBN = {978-3-8017-1846-6}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 3771, title = {Recognising novel deforming objects}, year = {2005}, month = {11}, day = {9}, pages = {3}, abstract = {Current theories of visual object recognition tend to focus on static properties, particularly shape. Nonetheless, visual perception is a dynamic experience–as a result of active observers or moving objects. Here, we investigate whether dynamic information can influence visual object-learning. Three learning experiments were conducted that required participants to learn and subsequently recognize different non-rigid objects that deformed over time. Consistent with previous studies of rigid depth-rotation, our results indicate that human observers do represent object-motion. Furthermore, our data suggest that dynamic information could compensate for when static cues are less reliable, for example, as a result of viewpoint variation.}, web_url = {http://www.opam.net/archive/opam2005/OPAM05Abstracts.pdf}, event_name = {13th Annual Workshop on Object Perception, Attention, and Memory (OPAM 2005)}, event_place = {Toronto, Canada}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 3811, title = {A model of Theory-of-Mind based on action prediction}, year = {2005}, month = {11}, volume = {35}, number = {877.14}, abstract = {Theory-of-Mind, or mentalising, is defined as a cognitive process used to understand other peoples' actions based on mental states. Two main theories of mentalising have been much discussed in recent years: Simulation Theory (e.g. Goldman 1993; Gallese and Goldman 1998), and Theory-Theory (e.g. Gopnik 1993). Models derived from these ideas and integrating neuroscience findings have been proposed since (e.g. Frith and Frith 1999; Blakemore and Decety 2001; Wolpert, Doya and Kawato 2003). Here we use the main idea from one of these models (Wolpert et al 2003) to propose a tentative model of mentalising based on action prediction and semantic representation. We also review a few neuroimaging studies of the processes involved in the model.}, file_url = {/fileadmin/user_upload/files/publications/SfN2005poster_[0].pdf}, web_url = {http://www.sfn.org/index.aspx?pagename=neuroscienceQuarterly_05summer_ns2005preview}, event_name = {35th Annual Meeting of the Society for Neuroscience (Neuroscience 2005)}, event_place = {Washington, DC, USA}, state = {published}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; de Vignemont F} } @Techreport{ 3637, title = {Categorical perception of gender: No evidence for unfamiliar faces}, year = {2005}, month = {10}, number = {094}, abstract = {We investigated whether male and female faces are discrete categories at the perceptual level. We created artificial gender continua between male and female faces using a 3D-morphing algorithm and used classical categorization and discrimination tasks to investigate categorical perception of gender. In Experiments 1 and 3, 3D morphs were computed between male and female faces. The results of the discrimination task suggest that the gender of unfamiliar faces is not categorically perceived. When participants were familiarized with the male and female endpoint faces before testing (Experiment 3), a categorical effect was found. In Experiment 2, only shape or texture of unfamiliar 3D morphs was indicative of gender, while other information (e.g. texture or shape) was kept constant. Again there was no evidence of a categorical effect in the discrimination task. In Experiments 1, 2 and 3, changes in the gender of a face were also coupled with changes in identity which may have confounded the findings. In Experiments 4 and 5, we used face continua in which only the gender of the facial features changed, while the characteristic of the facial features remained constant. When the faces were unfamiliar (Experiment 4), there was no evidence of categorical perception of gender. In Experiment 5, participants learned to classify the face images in two gender categories using a feedback procedure. A clear categorical effect for gender was present after training. Our findings suggest that despite the importance of faces, gender information present in faces is not naturally perceived categorically. Consequently participants showed categorical perception of gender only after training with the face stimulus set.}, file_url = {/fileadmin/user_upload/files/publications/techreport_094_[0].pdf}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Conference{ LanderC2005, title = {Recognizing Face Identity from Natural and Morphed Smiles}, year = {2005}, month = {9}, day = {3}, pages = {72}, abstract = {People find it is easier to recognise the identity of a familiar face in non-optimum viewing conditions when it is moving (smiling, talking), compared to when shown as a static image. Here we explore the theoretical underpinnings of the moving face recognition advantage. Specifically, we compare the identification of personally familiar faces from natural smile sequences (dynamic morphing), artificial smile sequences, single static neutral images and single static smiling images. Results showed recognition was best when the face was viewed naturally smiling. A further experiment investigated the impact of motion tempo on the recognition of morphed familiar faces. Results indicate a significant interaction between the naturalness of the motion and the speed of the observed motion. We conclude that the recognition advantage for face motion does not reflect a general benefit for motion, but instead suggests that, for familiar faces, information about their characteristic motion is stored in memory.}, web_url = {http://escop.eu/site_media/uploads/14th.pdf}, event_name = {14th Bi-Annual Meeting of the European Society for Cognitive Psychology (ESCOP 2005)}, event_place = {Leiden, The Netherlands}, state = {published}, author = {Lander K; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ 3590, title = {Accuracy in face recognition: Better performance for face identification with changes in identity and caricature but not with changes in sex}, journal = {Journal of Vision}, year = {2005}, month = {9}, volume = {5}, number = {8}, pages = {379}, abstract = {Because we encounter faces of only two sexes but recognize faces of innumerable different identities, it is often implicitly assumed that sex determination is easier than identification in face recognition. Many studies support this assumption. For example, we are very accurate at telling the sex of unfamiliar faces in photographs (Bruce, et al., 1993. Perception, 22, 131–52) and sex categorization is performed more rapidly, on average, than familiarity or identity decisions (Bruyer, Galvez, & Prairial, 1993. British Journal of Psychology, 84, 433–441). The question that we investigated here is how sensitive we are to variations of identity-related features or sex-related features in familiar faces. 38 participants had to pick out the veridical faces of ten familiar work colleagues from amongst distractor faces that were variations of the original faces. Distractor faces varied either in identity, caricature or sex. In the identity face sets, distractor faces were various morphs between the original face and two unfamiliar faces. In the caricature face sets, distractors were various caricatures of the original face. Finally, in the sex face sets, distractor faces were various feminized and masculinized versions of the original face. Participants were most accurate at identifying the original face amongst distractors in the identity sets. They had a tendency to choose positive caricatures over the original faces in caricature sets. However, participants were very poor at finding the original faces in the sex sets. The results suggest that while extracting and processing sex-related information from a face is a comparatively easy task, we do not seem to retain sex-related facial information in memory as accurately as identity-related information. These results have implications for models of face representation and face processing.}, web_url = {http://journalofvision.org/5/8/379/}, event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/5.8.379}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell F{fiona}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2005, title = {Why use Line Drawings?}, year = {2005}, month = {9}, volume = {6}, pages = {8}, abstract = {Studies in the field of visual object recognition generally report observed human performance with 2D still images e.g. photographs, line-drawings. One of the main reasons for doing so stems from the ready availability of such stimuli for experimentation (for example, see http://www.cog.brown.edu/~tarr/projects/databank.html). Human visual perception, however, is a dynamic process - as the result of either an active observer or a moving target, the visual experience is rarely static. Hence, it is important to question whether such findings realistically portray daily human behavior. Recent experiments using dynamic stimuli have shown that human performance can differ as a result of introducing natural motion information to the studied object; for example, there is a recognition benefit for when faces are seen moving (e.g., Toole et al, 2002). Such evidence clearly suggests that object motion plays a non-trivial role in visual recognition. Nonetheless, there are challenges - both technical and experimental - that a researcher ought to consider when using dynamic stimuli. Here, I will discuss some of these issues as well as the steps that were adopted, in my research, to overcome them. In particular, I will describe how different types of dynamic stimuli could be generated for various experiments in novel object and face learning, as well as some of software and hardware available for this undertaking. In addition, I will briefly discuss how such stimuli could be presented in psychophysical experiments, such as to control for possible artifacts e.g., timing errors.}, web_url = {http://www.neuroschool-tuebingen-nena.de/index.php?id=284}, event_name = {6. Neurowissenschaftliche Nachwuchskonferenz Tübingen (NeNa '05)}, event_place = {Blaubeuren, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 3450, title = {Recognizing novel deforming objects}, year = {2005}, month = {8}, pages = {158}, abstract = {Human visual recognition can be improved with object motion (e.g., faces, Lander and Chuang, 2005; rigid objects, Vuong and Tarr, 2004) This improvement suggests that it is not merely shape information that characterizes an object. Rather, human observers may also represent how shape changes over time for recognition.}, file_url = {/fileadmin/user_upload/files/publications/pdf3450.pdf}, web_url = {http://portal.acm.org/citation.cfm?id=1080438}, editor = {Bülthoff, H.H., T. Troscianko}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {2nd Symposium on Applied Perception in Graphics and Visualization (APGV 2005)}, event_place = {La Coruña, Spain}, state = {published}, ISBN = {1-59593-139-2}, DOI = {10.1145/1080402.1080438}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ SchultzFWF2005, title = {Activation in superior temporal sulcus parallels a parameter inducing the percept of animacy}, journal = {Perception}, year = {2005}, month = {8}, volume = {34}, number = {ECVP Abstract Supplement}, pages = {62}, abstract = {An essential, evolutionarily stable feature of brain function is the detection of animate entities, and one of the main cues to identify them is their movement. We developed a model of a simple interaction between two objects, in which we could control the percept of animacy by varying one parameter. The two disk-like objects moved along separate random trajectories but were also influenced by each other's positions, such that one object followed the other, in a parametrically controlled fashion. An increase of the correlation between the object's movements varied the amount of interactivity and animacy observers attributed to them. Control animations were only different from the experimental in terms of the interactivity level, but not in terms of object speed and separation. Twelve observers lying in a magnetic-resonance-imaging scanner had to rate the amount of interactivity and the overall speed of the objects in separate, subsequent tasks. Behavioural results showed a significant difference in interactivity ratings between experimental and control stimuli, but no difference in speed ratings, as expected. There was no response-time difference between the tasks. The fMRI data revealed that activation in the posterior superior temporal sulcus and gyrus (pSTS/pSTG) increased in relation to the degree of correlated motion between the two objects. This activation increase was not different when subjects performed an explicit or implicit task while observing these interacting objects. These data suggest that the pSTS and pSTG play a role in the automatic identification of animate entities, by responding directly to an objective movement characteristic inducing the percept of animacy, such as the amount of interactivity between two moving objects. These findings are consistent with literature showing that, in monkey and human, pSTS and pSTG respond to stimuli displaying biological motion.}, web_url = {http://pec.sagepub.com/content/34/1_suppl.toc}, event_name = {28th European Conference on Visual Perception}, event_place = {A Coruña, Spain}, state = {published}, DOI = {10.1177/03010066050340S101}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Friston KJ; Wolpert DM; Frith CD} } @Poster{ 4004, title = {Sequence selectivity of form transformation in visual object recognition}, journal = {Perception}, year = {2005}, month = {8}, volume = {34}, number = {ECVP Abstract Supplement}, pages = {130}, abstract = {Object motion, eg depth-rotation, provides visual information that might be useful for the reconstruction of an object's 3-D structure, hence increasing the recognition likelihood of any given moving object. Our aim is to demonstrate that object motion can, in itself, serve as an independent cue to object identity without particular recourse to form-retrieval processes. In this study, we used novel amoeboid objects that transformed nonrigidly over time. Two experiments are reported on the learnt recognition of such stimuli. During an initial study phase, participants learnt to identify these objects. At test, participants were either presented with an old/new recognition task (experiment 1) or with a two-alternative forced-choice task (experiment 2). Here, learnt stimuli were presented in either the studied sequence of shape transformations, or the reverse order. Although the shapes shown were the same in both instances, the overall findings indicate that participants performed significantly better in recognising the learnt objects when the same shapes were presented in the learnt sequence, than when they were presented in reverse sequence. If object motion facilitates recognition of the stimulus solely by contributing to the recovery of its form, the sequence of non-rigid transformation would not be relevant to its representation. Nonetheless, these findings suggest that human observers do not merely remember a visual object as a collection of different shapes. Instead, observers are also sensitive to how these shapes transform over time.}, web_url = {http://pec.sagepub.com/content/34/1_suppl.toc}, event_name = {28th European Conference on Visual Perception}, event_place = {A Coruña, Spain}, state = {published}, DOI = {10.1177/03010066050340S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Conference{ 3772, title = {Motion matters: learning dynamic objects}, year = {2005}, month = {8}, abstract = {Previous research has typically focused on static properties of objects. Recently there has been a growing interest in the role that dynamic information might play in the perception and representation of objects. In this talk we approach this issue by describing how the visual system utilises dynamic information in learning two different classes of visual objects: i) novel deforming stimuli, ii) faces. Object-learning experiments with novel objects show that human observers are sensitive to the motion characteristics. In addition, preliminary results also suggest that learned motion characteristics can reduce the detrimental effects of changing the studied viewpoint. Using faces, we explored how encoding of identity is affected by two different types of facial movements: non-rigid facial motion, and looming facial motion. Using a delayed visual search paradigm we could show that faces learned in motion were found more quickly and more accurately than faces learned from static snapshots. In summary, results from our lab suggest that the visual system uses dynamic information to encode and subsequently recognize new object/face identities.}, event_name = {Sensational Seminar Series, School of Psychology, Cardiff University}, event_place = {Cardiff, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ 3813, title = {Shape perception for object recognition and face categorization}, journal = {Perception}, year = {2005}, month = {8}, volume = {34}, number = {ECVP Abstract Supplement}, pages = {21}, abstract = {Even though shape is the basis of object recognition, there is still an on-going debate about how it is perceived and represented in the brain. An important question is how various visual cues, like disparity and texture, are integrated into a unique shape percept. Different visual information has also been shown to play an ancillary role in shape perception. For example, cast shadows can help disambiguate shape perception (Kersten et al, 1996 Nature 379 31) while 2D retinal motion information can help organize dots into meaningful shapes despite incongruent depth information (Bülthoff et al, 1998 Nature Neuroscience 1 254 - 257). Shape perception is also important for object categorization. For example, faces varying in shape and texture may be perceptually grouped into different categories (a phenomenon known as categorical perception). Previous studies have shown that faces varying in expressions, identity or race are perceived categorically (e.g. Levin &amp;amp;amp;amp; Angelone, 2002 Perception 31 567 - 578). We did not find similar effect for faces varying in masculinity/feminity (Bülthoff &amp;amp;amp;amp; Newell, 2004 Visual Cognition 11 823 - 855). This difference in perception for sex and identity is supported by new studies showing a lack of sensitivity to sex changes in familiar faces, while changes in identity are easily noticed. These results have implications for the nature of shape representations of faces in the brain.}, web_url = {http://pec.sagepub.com/content/34/1_suppl.toc}, event_name = {28th European Conference on Visual Perception}, event_place = {A Coruña, Spain}, state = {published}, DOI = {10.1177/03010066050340S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Article{ 2929, title = {Why are moving faces easier to recognize?}, journal = {Visual Cognition}, year = {2005}, month = {4}, volume = {12}, number = {3}, pages = {429-442}, abstract = {Previous work has suggested that seeing a famous face move aids the recognition of identity, especially when viewing conditions are degraded (Knight & Johnston, 1997; Lander, Christie, & Bruce, 1999). Experiment 1 investigated whether the beneficial effects of motion are related to a particular type of facial motion (expressing, talking, or rigid motion). Results showed a significant beneficial effect of both expressive and talking movements, but no advantage for rigid motion, compared with a single static image. Experiment 2 investigated whether the advantage for motion is uniform across identity. Participants rated moving famous faces for distinctiveness of motion. The famous faces (moving and static freeze frame) were then used as stimuli in a recognition task. The advantage for face motion was significant only when the motion displayed was distinctive. Results suggest that a reason why moving faces are easier to recognize is because some familiar faces have characteristic motion patterns, which act as an additional cue to identity.}, file_url = {/fileadmin/user_upload/files/publications/pdf2929.pdf}, web_url = {http://www.informaworld.com/smpp/6084610-9322645/ftinterface~content=a713734696~fulltext=713240930~frm=content}, state = {published}, DOI = {10.1080/13506280444000382}, author = {Lander K; Chuang L{chuang}} } @Conference{ 3773, title = {Recognising Flubber: Role of motion in visual object recognition}, year = {2005}, month = {3}, event_name = {Brainstorming Colloquium, Department of Psychology, University of Manchester}, event_place = {Manchester, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton I{ian}{Department Human Perception, Cognition and Action}} } @Article{ 3063, title = {Activation in posterior superior temporal sulcus parallels parameter inducing the percept of animacy}, journal = {Neuron}, year = {2005}, month = {2}, volume = {45}, number = {4}, pages = {625-635}, abstract = {An essential, evolutionarily stable feature of brain function is the detection of animate entities, and one of the main cues to identify them is their movement. We developed a model of a simple interaction between two objects, in which an increase of the correlation between their movements varied the amount of interactivity and animacy observers attributed to them. Functional magnetic resonance imaging revealed that activation in the posterior superior temporal sulcus and gyrus (pSTS / pSTG) increased in relation to the degree of correlated motion between the two objects. This activation increase was not different when subjects performed an explicit or implicit task while observing these interacting objects. These data suggest that the pSTS and pSTG play a role in the automatic identification of animate entities, by responding directly to an objective movement characteristic inducing the percept of animacy, such as the amount of interactivity between two moving objects.}, file_url = {/fileadmin/user_upload/files/publications/pdf3063.pdf}, state = {published}, DOI = {10.1016/j.neuron.2004.12.052}, author = {Schultz J{johannes}; Friston K; Wolpert D; Frith C} } @Poster{ 5075, title = {Automatic Classification of Plankton from Digital Images}, journal = {ASLO Aquatic Sciences Meeting}, year = {2005}, month = {2}, volume = {1}, pages = {1}, state = {published}, author = {Sieracki M; Riseman E; Balch W; Benfield M; Hanson A; Pilskaln C; Schultz H; Sieracki C; Utgoff P; Blaschko M{blaschko}; Holness G; Mattar M; Lisin D; Tupper B} } @Poster{ 3301, title = {Sensitivity to changes in identity, caricature and sex in face recognition}, year = {2005}, month = {2}, volume = {8}, pages = {124}, abstract = {It is known that we are quite accurate at judging the sex of unfamiliar faces [1]. Furthermore sex categorization is performed more rapidly, on average, than familiarity or identity decisions [2]. In one of our recent studies on face perception, with unfamiliar faces [3] we were surprised to find that discrimination performance was much lower for faces differing in sex quality than when the facial features were morphed between two identities. Here, we investigated if this observation holds also for familiar faces. The motivation for this series of experiments was to find out if memory of familiar faces was showing similar differences; participants being more inaccurate when they had to remember the specific feminity or masculinity of a well known face than when identity-related changes of facial features were involved. Participants had to identify the veridical faces of familiar work colleagues among ten distractor faces that were morphing variations of the original faces. Distractor faces varied either in identity, caricature or sex. In the identity face sets, distractor faces were morphs between the original face and unfamiliar faces mixed in different proportions. In the caricature face sets, distractors were different caricatures of the original face. Finally, in the sex face sets, distractor faces were different feminized and masculinized versions of the veridical face. Participants performed best when the original face was presented among identity distractors. They had a tendency to choose feature enhancing caricatures over the original faces in caricature sets. Participants were very poor at finding the original faces in the sex sets. Generally our findings with unfamiliar faces show that sex-related changes in facial features are less obvious to the observers than identity-related changes. Furthermore our study on familiar faces suggests that we do not retain sex-related facial information in memory as accurately as identity-related information. These results have important implications for models of face recognition and how facial features are represented in the brain.}, web_url = {http://www.twk.tuebingen.mpg.de/twk05/programm.php}, editor = {Bülthoff, H. H., H. A. Mallot, R. Ulrich and F. A. Wichmann}, event_name = {8th Tübingen Perception Conference (TWK 2005)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 5073, title = {Automatic In Situ Identification of Plankton}, year = {2005}, month = {1}, pages = {79}, event_name = {Seventh IEEE Workshops on Application of Computer Vision (WACV/MOTIONS'05)}, event_place = {Breckenridge, CO, USA}, state = {published}, author = {Blaschko MB{blaschko}; Holness G; Mattar MA; Lisin D; Utgoff PE; Hanson AR; Schultz H; Riseman EM; Sieracki ME; Balch WM; Tupper B} } @Article{ 3062, title = {Activation of the human superior temporal gyrus during observation of goal attribution by intentional objects}, journal = {Journal of Cognitive Neuroscience}, year = {2004}, month = {12}, volume = {16}, number = {10}, pages = {1695-1705}, abstract = {Previous functional imaging experiments in humans showed activation increases in the posterior superior temporal gyrus and sulcus during observation of geometrical shapes whose movements appear intentional or goal-directed. We modeled a chase scenario between two objects, in which the chasing object used different strategies to reach the target object: the chaser either followed the target’s path or appeared to predict its end position. Activation in the superior temporal gyrus of human observers was greater when the chaser adopted a predict rather than a follow strategy. Attending to the chaser’s strategy induced slightly greater activation in the left superior temporal gyrus than attending to the outcome of the chase. These data implicate the superior temporal gyrus in the identification of objects displaying complex goal-directed motion.}, file_url = {/fileadmin/user_upload/files/publications/pdf3062.pdf}, state = {published}, DOI = {10.1162/0898929042947874}, author = {Schultz J{johannes}; Imamizu H; Kawato M; Frith C} } @Article{ 2385, title = {Categorical perception of sex occurs in familiar but not unfamiliar faces.}, journal = {Visual Cognition}, year = {2004}, month = {10}, volume = {11}, number = {7}, pages = {823-855}, abstract = {We investigated whether male and female faces are discrete categories at the perceptual level and whether familiarization plays a role in the categorical perception of sex. We created artificial sex continua between male and female faces using a 3‐D morphing algorithm and used classical categorization and discrimination tasks to investigate categorical perception of sex. In Experiments 1 and 2, 3‐D morphs were computed between individual male and female faces. In Experiments 3 and 4, we used face continua in which only the sex of the facial features changed, while the identity characteristics of the facial features remained constant. When the faces were unfamiliar (Experiments 1 and 3), we failed to find evidence for categorical perception of sex. In Experiments 2 and 4, we familiarized participants with the individual face images by instructing participants to learn the names of the individuals in the endpoint face images (Experiment 2) or to classify face images along a continuum as male or female using a feedback procedure (Experiment 4). In both these experiments we found evidence for a categorical effect for sex after familiarization. Our findings suggest that despite the importance of face perception in our everyday world, sex information present in faces is not naturally perceived categorically. Categorical perception of sex was only found after training with the face stimulus set. Our findings have implications for functional models of face processing which suggest two independent processing routes, one for facial expression and one for identity: We propose that sex perception is closely linked with the processing of facial identity.}, file_url = {/fileadmin/user_upload/files/publications/categorical_perception_of_sex_occurs_in_familiar_but_not_infamiliar_faces_2385[0].pdf}, web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506280444000012}, state = {published}, DOI = {10.1080/13506280444000012}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell F{fiona}{Department Human Perception, Cognition and Action}} } @Article{ 3064, title = {Conscious will in the absence of ghosts, hypnotists, and other people}, journal = {Behavioural and Brain Sciences}, year = {2004}, month = {10}, volume = {27}, number = {5}, pages = {674-675}, abstract = {We suggest that certain experiences reported by patients with schizophrenia show that priority, consistency, and exclusivity are not sufficient for the experience of willing an action. Furthermore, we argue that even if priority, consistency, and exclusivity cause the experience of being the author of an action, this does not mean that conscious will is an illusion.}, file_url = {/fileadmin/user_upload/files/publications/PrecisWegnerCommentBBS2004_3064[0].pdf}, web_url = {http://journals.cambridge.org/production/action/cjoGetFulltext?fulltextid=287794}, state = {published}, author = {Schultz J{johannes}; Sebanz N; Frith C} } @Poster{ 3188, title = {Activity in posterior superior temporal gyrus correlates inversely with kinematic information during observation of human actions}, year = {2004}, month = {10}, volume = {34}, number = {664.1}, abstract = {The cortex surrounding the posterior superior temporal sulcus of humans and monkeys is known to be activated during observation of biological movements, including human actions (1,2). In our event-related fMRI experiment, 12 healthy human volunteers were asked to discriminate between 2 versions of four different human actions on the basis of their movement kinematics. The difficulty of the task was influenced by the number of joints showing differences between the two movement versions. Clusters in the posterior superior temporal sulcus region in both hemispheres were the only brain regions whose activity varied inversely with the number of joints with significant differences between the two movement versions (clusters identified by SPM RFX analysis with 12 subjects thresholded at p<0.001 uncorrected, correlation with activation in left STS: R² = 0.96, right STS: R²= 0.94). Activity in the cluster identified in the right posterior superior temporal sulcus also showed a trend to correlate positively with participants’ performance (non significant, p=0.13). These results suggest that the cortex surrounding the posterior superior temporal sulcus participates in the extraction of kinematic information from observed biological movements, with activity increasing with task difficulty.}, web_url = {http://www.sfn.org/absarchive/}, event_name = {34th Annual Meeting of the Society for Neuroscience (Neuroscience 2004)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Schultz J{johannes}; Ingram JN; Wolpert DM; Frith CD} } @Poster{ 3067, title = {Interactions between audition and vision for face recognition}, journal = {Perception}, year = {2004}, month = {9}, volume = {33}, number = {ECVP 2004 Abstract Supplement}, pages = {108}, abstract = {We can recognise distinctive faces more easily than typical ones. We investigated whether this distinctiveness effect appears for visually typical faces when these faces have been associated with features that are distinctive in another sensory modality. Participants first learned a set of unfamiliar faces. During learning, half of these faces were paired with distinctive auditory stimuli and half with typical stimuli. In experiment 1, the auditory stimuli were voices. We found that recognition performance in a visual recognition test was significantly (p < 0.005) better for faces that had been paired with distinctive voices. In experiment 2, we tested whether voice information improved face recognition directly by association or whether distinctiveness effects were due to enhanced attention during learning. In a priming experiment, participants recognised a face significantly faster (p <0.05) when this face was preceded by its congruent voice. Thus the quality of auditory information can affect recognition in another modality like vision. In experiment 3, the stimuli consisted of non-speech sounds. In this experiment, we tested whether voices and faces represent a special case of cross-modal memory enhancement or whether this distinctiveness effect occurs also with more arbitrary associations. Recognition performance in a visual recognition test suggests that a similar effect is present.}, web_url = {http://pec.sagepub.com/content/33/1_suppl.toc}, event_name = {27th European Conference on Visual Perception}, event_place = {Budapest, Hungary}, state = {published}, DOI = {10.1068/ecvp04a}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Conference{ 3066, title = {Distinctive auditory information improves visual face recognition}, journal = {Journal of Vision}, year = {2004}, month = {8}, volume = {4}, number = {8}, pages = {139}, abstract = {Face recognition studies have shown that distinctiveness can improve recognition. Distinctiveness effects have also been found in stimuli other than faces suggesting that it is a general mechanism. Here we tested cross-modal effects of distinctiveness and asked whether distinctive voices can improve memory for otherwise typical faces. In all experiments participants first learned a set of static, unfamiliar faces. During learning, half of these faces were paired with distinctive voices and half were paired with typical voices. Face stimuli were counterbalanced across these voice conditions. In Experiment 1 we found that recognition performance in a visual recognition test was significantly (p}, web_url = {http://journalofvision.org/4/8/139/}, event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/4.8.139}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 3189, title = {Attention effects on superior temporal sulcus and gyrus activation during observation of intentional objects}, journal = {NeuroImage}, year = {2004}, month = {6}, volume = {22}, number = {Supplement 1}, pages = {e2122}, abstract = {Background Behavioural studies of children and adults show that goal-directedness is an important cue for the attribution of animacy to elements of the environment. In monkey and human, the cortex surrounding the superior temporal sulcus (STS) and gyrus (STG) is known to respond to biological motion and to intentional actions [1], and could thus participate in the detection of animate entities [2]. A growing number of neuroimaging studies also indicate that the STS appears to be involved in the representation of mental states. We reasoned that if the STS and STG are sensitive to intentional motion, activation in these structures would vary with the amount of perceived goal-directed motion. Further, if the STS is involved in the attribution of mental states, this structure might respond more when an observed goal-directed behaviour is directed by a representation of the target’s goal rather than a representation of the target’s position. Also, previous studies revealed attention effects on activation of the STS during processing of biological motion and of socially relevant characteristics of a face. Which level of processing in the STS is affected by attention is not yet clear. Methods We devised two fMRI experiments to study the response of the STS and of other brain regions to goal-directedness and the role of attention on this process. We presented healthy adult volunteers with animations of two round shapes moving in a seemingly animate way. In experiment 1, we parametrically varied the amount of goal-directed motion of the two abstract moving objects. Subjects had either to rate the amount of interaction between the moving objects or their speed, which was manipulated independently. In experiment 2, we manipulated the strategy used to reach the goal: agents either seemed to use knowledge of the goals attributed to the target object or to follow the target object. Stimuli were controlled for speed and quantity of movement, and eye movements were monitored in experiment 1. Results Increase in goal-directed behaviour parametrically increased activation in STS/STG and also in the medial occipital cortex and fusiform gyrus, even when subjects performed an incidental task. In experiment 2, watching agents trying to reach targets by using knowledge about the goals of the target object increased activation in the STS/STG, but only when subjects payed attention to the chaser’s strategy and not to the outcome of the chase. Conclusion We conclude that 1) the cortex in the STS/STG region responds to goal-directed behaviour independently of the task performed by the subject, and 2) it reponds more when a chase appears to be directed by a representation of the target’s goal rather than the target’s position, but only when subjects explicitly look for strategies of the chaser. This suggests that only higher levels of processing of socially relevant characteristics are under the influence of attention.}, web_url = {http://www.sciencedirect.com/science/article/pii/S1053811905700197}, event_name = {Tenth Annual Meeting of the Organization for Human Brain Mapping (HBM 2004)}, event_place = {Budapest, Hungary}, state = {published}, DOI = {10.1016/S1053-8119(05)70019-7}, author = {Schultz J{johannes}; Friston K; Imamizu H; Frith C} } @Article{ 3061, title = {Dissociable roles of ventral and dorsal striatum in instrumental conditioning}, journal = {Science}, year = {2004}, month = {4}, volume = {304}, number = {5669}, pages = {452-454}, abstract = {Instrumental conditioning studies how animals and humans choose actions appropriate to the affective structure of an environment. According to recent reinforcement learning models, two distinct components are involved: a "critic," which learns to predict future reward, and an "actor," which maintains information about the rewarding outcomes of actions to enable better ones to be chosen more frequently. We scanned human participants with functional magnetic resonance imaging while they engaged in instrumental conditioning. Our results suggest partly dissociable contributions of the ventral and dorsal striatum, with the former corresponding to the critic and the latter corresponding to the actor.}, web_url = {http://www.sciencemag.org/content/304/5669/452.full.pdf}, state = {published}, DOI = {10.1126/science.1094285}, author = {O'Doherty J; Dayan P; Schultz J{johannes}; Deichmann R; Friston K; Dolan RJ} } @Poster{ 2631, title = {Haptic Magnitude Estimates of Size for Graspable Shapes}, year = {2004}, month = {2}, volume = {7}, pages = {122}, abstract = {Studies of visual size perception with the method of magnitude estimation have shown a linear relationship between actual sizes and magnitude estimates [1]. Similar studies for touch do not yield unequivocal evidence for a linear relationship; in some cases, a positively accelerated power function described best the relationship between stimulus sizes and estimates [2]. We have investigated haptic magnitude estimation for length in two haptic experiments with different methods of haptic exploration (whole hand, nger span). The haptic stimuli consisted of 15 rectangular shapes. The only difference from one shape to another was the length of the horizontal side, which ranged from 40 mm to 68 mm in equal intervals. For all shapes, the depth and height were 10 mm and 40 mm, respectively. In the Multiple cues Experiment, blindfolded participants used their dominant hand to feel each shape freely. The shape was presented xed at onto a support, so they could feel the entire shape under their hand. The participants' task was to give a modulus-free magnitude estimate for the horizontal side. All shapes were presented once in random order in each block. In the Single cue Experiment, blindfolded participants were restricted to grasping the horizontal side of a shape between the thumb and index nger of their dominant hand. Their task was to give a magnitude estimate for the length of that side. Magnitude estimates for side length could be tted by a two-parameter linear function with a high goodness-of-t statistic in both experiments (R2 ' .97). Thus, when participants were given a size range of 40 to 68 mm, their magnitude estimates increased linearly with each physical increment, independently of the exploration method used. Because of the small range of total size variation present in the shape set, we do not conclude from our results that haptic magnitude estimation of unidimensional size is generally linear. It should be noted that the present linear functions had a negative y-intercept and that when a power function was t to the data, the exponent was greater than 1.0 in both experiments, and goodness-of-t was also high. Our results suggest, however, that haptic perception of size can safely be considered linear within this small part of the size continuum. These results are important for considering further psychophysical studies with shapes within this size range.}, web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php}, event_name = {7th Tübingen Perception Conference (TWK 2004)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Klatzky RL{bobby}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Conference{ 2932, title = {The importance of motion for learning and recognising faces.}, year = {2004}, month = {1}, event_name = {76th Vision Seminar}, event_place = {ATR Laboratories, Japan}, state = {published}, author = {Lander K; Chuang L{chuang}; Bruce V} } @Thesis{ 3065, title = {Influence of goals on observation of actions: functional neuroimaging studies}, year = {2004}, file_url = {fileadmin/user_upload/files/publications/Schultz-Johannes-PhD.pdf}, state = {published}, type = {PhD}, author = {Schultz JWR{johannes}} } @Poster{ 2630, title = {Interaction between vision and audition in face recognition}, journal = {Abstracts of the Psychonomic Society}, year = {2003}, month = {11}, volume = {8}, pages = {57}, abstract = {Face studies have shown that distinctive faces are more easily recognized than typical faces in memory tasks. We investigated whether a cross-modal interaction between auditory and visual stimuli exists for face distinctiveness. During training, participants were presented with faces from two sets. In one set all faces were accompanied by characteristic auditory stimuli (d-faces). In the other set, all faces were accompanied by typical auditory stimuli (s-faces). Face stimuli were counterbalanced across auditory conditions. We measured recognition performance in an old/new recognition task. Face recognition alone was tested. Our results show that participants were significantly better (t(12) = 3.89, p< 0.005) at recognizing d-faces than s-faces in the test session. These results show that there is an interaction between different sensory inputs and that typicality of stimuli in one modality can be modified by concomitantly presented stimuli in other sensory modalities.}, web_url = {http://c.ymcdn.com/sites/www.psychonomic.org/resource/resmgr/Annual_Meeting/Past_and_Future_Meetings/Abstracts03.pdf}, event_name = {44th Annual Meeting of The Psychonomic Society}, event_place = {Vancouver, Canada}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 3190, title = {STs/STG region responds parametrically to goal-directedness during observation of abstract agents}, year = {2003}, month = {11}, volume = {33}, number = {86.16}, abstract = {Goal-directed behaviour is an important cue for the attribution of animacy to elements of the environment, as has been repeatedly shown in behavioural studies of children and adults. In monkey and human, the superior temporal sulcus (STS) and gyrus (STG) are known to respond to stimuli displaying biological motion, and could thus participate in the detection of living entities. As goal-directedness is important for the attribution of animacy, we expected STS and STG to respond when objects appearing animate try to reach a goal. As the STS also appears to be involved in tasks involving mentalizing, it might also be sensitive to the way the agents reach their goal. In two fMRI experiments, we presented healthy adult volunteers with two agents (interacting, round shapes moving in a seemingly animate way) and varied the goal-directedness in their behaviour. In exp. 1, we parametrically increased the goal-directedness in the interaction of the agents, and in exp. 2 we varied the strategy used to reach the goal: agents either seemed to rely on mentalizing or only on physical cues. Stimuli were controlled for speed and quantity of movement. Increase in goal-directed behaviour parametrically increased activation in STS and STG, even when subjects performed an incidental task. In exp. 2, watching agents using a mentalizing strategy increased activation in the STS and STG; this was reduced when subjects performed an incidental task. We conclude that 1) the STS / STG region responds to goal-directed behaviour independently of the task performed by the subject, and 2) it reponds more when the goal-directed behaviour is apparently relying on mentalizing. This second activation increase seems to be only significant when subjects explicitly look for mental states in the observed behaviour.}, web_url = {http://www.sfn.org/index.aspx?pagename=annualmeeting_futureandpast}, event_name = {33rd Annual Meeting of the Society for Neuroscience (Neuroscience 2003)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Schultz J{johannes}; Friston KJ; Imamizu H; Frith CD} } @Poster{ BulthoffN2003_2, title = {Interaction between vision and speech in face recognition}, journal = {Journal of Vision}, year = {2003}, month = {10}, volume = {3}, number = {9}, pages = {825}, abstract = {Many face studies have shown that in memory tasks, distinctive faces are more easily recognized than typical faces. All these studies were performed with visual information only. We investigated whether a cross-modal interaction between auditory and visual stimuli exists for face distinctiveness. Our experimental question was: Can visually typical faces become perceptually distinctive when they are accompanied by voice stimuli that are distinctive? In a training session, participants were presented with faces from two sets. In one set all faces were accompanied by characteristic auditory stimuli during learning (d-faces: different languages, intonations, accents, etc.). In the other set, all faces were accompanied by typical auditory stimuli during learning(s-faces: same words, same language). Face stimuli were counterbalanced across auditory conditions. We measured recognition performance in an old/new recognition task. Face recognition alone was tested. Our results show that participants were significantly better (t(12) = 3.89, p< 0.005) at recognizing d-faces than s-faces in the test session. These results show that there is an interaction between different sensory inputs and that typicality of stimuli in one modality can be modified by concomitantly presented stimuli in other sensory modalities.}, web_url = {http://www.journalofvision.org/content/3/9/825.abstract}, event_name = {Third Annual Meeting of the Vision Sciences Society (VSS 2003)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/3.9.825}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Conference{ 2935, title = {The role of motion in learning new faces.}, year = {2003}, month = {9}, event_name = {European Conference on Cognitive Psychology}, event_place = {Granada, Spain}, state = {published}, author = {Lander K; Chuang L{chuang}; Bruce V} } @Conference{ 2934, title = {What aspects of facial motion are beneficial for recognition?}, year = {2003}, month = {7}, event_name = {12th International Conference on Perception and Action}, event_place = {Perth, Australia}, state = {published}, author = {Lander K; Chuang L{chuang}} } @Poster{ 4692, title = {Lightness Constancy: Shades are compensated in perception, scattering light not}, year = {2003}, month = {6}, volume = {29}, pages = {1017}, abstract = {Luminance of three-dimensional achromatic objects under given illumination depends on remittance of the material, shading and scattering light (mutual illumination of opposed surfaces). We studied perception of lightness of six flat surfaces, three exposed to direct illumination, three in the shade in opposite position, thus allowing mutual illumination of neighboring pairs. Six achromatic cardboards forming a logarithmic scale of remittance (step factor = 1.2) were used. Direct illumination increased the luminance of the cardboards by a factor of two with respect to those in the shade. The luminance sequence of the cardboards varied with their positions in light and shade. Altogether there are 720 permutations of the sequence of the six cardboards. For any of the 720 permutations selected for the experiments, the subjects tried to report the lightness sequence correctly in spite of the different luminance sequences. The difference between the empirical lightness sequence reported by the subjects and the physically measured sequence according to remittance was taken as the measure of lightness constancy. We used Kendall´s rank correlation coefficient τ, which is τR=1 if the two above sequences are identical. We also calculated τL from the empirical and the luminance sequences. If the subjects were unable to perceive the lightness, they would report the luminance sequence with τL=1 and τR<<1. Tests with 13 subjects on 30 different permutations yielded τR>τL (with exception of control experiments in which the sequence according to lightness and luminance happened to be the same physically, yielding τR=τL=1 correctly). Thus, lightness constancy of three-dimensional objects under asymmetric illumination as reported since more than 100 years (summary e.g. in A. L. Gilchrist: Lightness, Brightness, and Transparency, 1994) has been confirmed quantitatively with our novel method. In addition, a quantitative result concerning mutual illumination can be derived from our data. Mutual illumination of cardboards can change the luminance sequence as a consequence of neighborhood. From a light cardboard under direct illumination more light is scattered to and reflected by its dark neighbor in the shaded position, than from the same cardboards in exchanged position. We calculated the degree of lightness constancy separately for those cases, in which the luminance sequence was not changed by mutual illumination, and those were the difference with the lightness sequence was increased and decreased. Separate calculations of these groups revealed that the effect of local scattering light makes it more difficult to detect the correct lightness. Mutual illuminations increasing the difficulty resulted in less constancy (τR smaller), whereas mutual illumination resulting in greater similarity of the lightness and luminance sequences (thus reducing the difficulty) yielded better constancy (τR greater). The effect of light scattered locally at three-dimensional objects is not taken into account in the process leading to lightness constancy of perception. 1023 1017}, web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/proceedings2003.pdf}, editor = {Elsner, N. , H. Zimmermann}, event_name = {5th Meeting of the German Neuroscience Society, 29th Göttingen Neurobiology Conference}, event_place = {Göttingen, Germany}, state = {published}, author = {Armann R{armann}; Seelmann C; Schramme J} } @Poster{ 3191, title = {Detection of interacting objects by the human brain}, journal = {Journal of Cognitive Neuroscience}, year = {2003}, month = {4}, volume = {15}, number = {Supplement}, pages = {189}, web_url = {http://cogneurosociety.org/annual-meeting/previous-meetings/2003_abstracts_edit.xls/view}, event_name = {10th Annual Meeting of the Cognitive Neuroscience Society}, event_place = {New York, NY, USA}, state = {published}, author = {Schultz J{johannes}; Friston K; Wolpert D; Frith CD} } @Poster{ BulthoffN2003, title = {Cross-modal Aspect of Face Distinctiveness}, year = {2003}, month = {2}, volume = {6}, pages = {147}, abstract = {Various factors have been identi ed that in uence face recognition. Despite the diversity of the studies on face recognition, mostly factors related to visual information have been investigated so far. Among factors like facial motion, orientation and illumination, the distinctiveness of faces has been extensively studied. It is well known that distinctive faces are more easily recognized than typical faces in memory tasks. In our study we have addressed the question whether factors that are not of visual nature might also in uence face recognition. More speci cally, our experimental question was: can visually typical faces become perceptually distinctive when they are accompanied by voice stimuli that are distinctive and can these faces therefore become in this way more easily recognizable? In a training session, participants saw faces from two sets. In one set all faces were accompanied by characteristic auditory stimuli during learning (d-faces: di erent languages, intonations, accents, etc.). In the other set, all faces were accompanied by typical auditory stimuli during learning(s-faces: same words, same language). Face stimuli were counterbalanced across auditory conditions. Face recognition alone was tested. We measured recognition performance in an old/new recognition task. Our results show that participants were signi cantly better (t(12) = 3.89, p< 0.005) at recognizing d-faces than s-faces in the test session. Thus, our results demonstrate the perceptual quality of auditory stimuli (distinctive or typical) presented simultaneously with face stimuli can modify face recognition performance in a subsequent memory task and that typicality of stimuli in one modality can be modi ed by concomitantly presented stimuli in other sensory modalities.}, web_url = {http://www.twk.tuebingen.mpg.de/twk03/}, event_name = {6. Tübinger Wahrnehmungskonferenz (TWK 2003)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN} } @Inbook{ 1132, title = {Image-Based Recognition of Biological Motion, Scenes, and Objects}, year = {2003}, pages = {146-172}, abstract = {In this chapter we will review experiments using both explicit and implicit tasks to investigate object recognition using familiar objects (faces), unusual renderings of familiar objects (point-light walker), and novel scenes. While it is unlikely that participants would have already seen the particular renderings of familiar objects used in an experiment, they have definitely seen similar objects. For this reason, unfamiliar objects are used in many experiments to circumvent the problem of uncontrolled variations in prior exposure to objects. Another reason for using unfamiliar objects is that they allow us precise control over the types of features that are available for discrimination. How our visual system represents familiar and unfamiliar three-dimensional objects for the purpose of recognition is a difficult and passionately discussed issue. At the theoretical level a key question that any representational scheme has to address is how much the internal model depends on the viewing parameters. We will present 2 types of models regarding this issue and also address the question of whether the recognition process is more analytic or more holistic.}, web_url = {http://psycnet.apa.org/psycinfo/2003-88086-006}, editor = {Peterson, M.A. , G. Rhodes}, publisher = {Oxford University Press}, address = {New York, NY, USA}, booktitle = {Perception of Faces, Objects, and Scenes: Analytic and Holistic Processes}, state = {published}, ISBN = {0-19-516538-1}, DOI = {10.1093/acprof:oso/9780195313659.003.0007}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ Bulthoff2002, title = {No categorical perception of face gender found with different discrimination tasks}, journal = {Journal of Vision}, year = {2002}, month = {11}, volume = {2}, number = {7}, pages = {620}, abstract = {Faces are easily categorized as male or female. But is this categorization done at the perceptual level? In previous studies (ECVP 2001), we found no categorical perception of gender for face stimuli using two discrimination tasks: either simultaneous same-different task or delayed matching-to-sample. This conflicts with results of another study using a different task (Campanella et al, Visual Cognition, 2001). Here we tested whether categorical perception of gender might become apparent if we used a discrimination task (sequential same-different task) more similar to that used by Campanella et al. We employed the same type of stimuli as in our previous experiments. The face stimuli were created by generating series of morphs between pairs of male and female 3D faces (gender continua). We also generated a gender continuum based on an average face. While gender-related information was present in this latter continuum, the stimuli lacked individual characteristic facial features that might induce identity-related categorical perception. If male and female faces belong to perceptually distinct gender categories, we would expect that two faces that straddle the gender boundary are more easily discriminated than two faces that belonged to the same gender category. In our previous experiments we never found any evidence of categorical perception for unfamiliar faces. Our present results confirm these findings. We found no evidence that participants could discriminate more easily between faces that straddle the gender category. Thus no categorical effect for face gender was revealed when sequential same-different discrimination task was used. The conflicting results obtained by both studies do not appear to be due to the different discrimination tasks employed.}, web_url = {http://www.journalofvision.org/content/2/7/620.abstract}, event_name = {Second Annual Meeting of the Vision Sciences Society (VSS 2002)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/2.7.620}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 3192, title = {Neural correlates of Mimed and Real action perception}, journal = {NeuroImage}, year = {2002}, month = {6}, volume = {16}, number = {2 Supplement 1}, pages = {1386}, abstract = {When a person manipulates an object, this object influences the movement dynamics of the arm holding it. Performing an action while manipulating an object or miming the same action thus results in slightly different movements of the arm. We wanted to find which brain areas where activated when observers had to observe and categorise arm movements in "Mimed" or "Real" actions. Subjects had to base their decisions on the arm motion, as an object could be present or absent in the animation irrespectively of the movement type.}, web_url = {http://www.idac.tohoku.ac.jp/HBM2002/index.html}, event_name = {8th International Conference on Functional Mapping of the Human Brain (HBM 2002)}, event_place = {Sendai, Japan}, state = {published}, DOI = {10.1016/S1053-8119(02)90015-7}, author = {Schultz J{johannes}; Wolpert D; Frith C} } @Poster{ 1131, title = {Face gender is not perceived categorically}, year = {2002}, month = {2}, volume = {5}, pages = {84}, abstract = {In previous studies, we investigated whether male and female faces are perceived as distinct categories at the perceptual level and found no evidence of categorical perception using various discrimination tasks. In the present study we tested whether categorical perception of our stimuli might become apparent with yet another discrimination task, a sequential same-different task. The face stimuli used in all our experiments were derived from a database of 200 3D-laser scans of male and female faces (http://faces.kyb.tuebingen.mpg.de). Series of 3D-morphs were computed between individual male and female faces using the method of Blanz & Vetter (1999). Additionally, all faces of the database were used to compute average male and female faces to generate another series of morphs which was devoid of any individual features. One prediction of categorical perception is that two face stimuli that belong to different gender categories should be easier to discriminate than two face stimuli belonging to the same gender. In all our studies including the present one, most face pairs that straddle the gender category were not more easily discriminated than same category pairs. Thus, despite the use of different discrimination tasks, we found no categorical effect for face gender with our face stimuli, even when exemplar specific effects are eliminated as it is the case with average faces. We will discuss these results and compare them to the conflicting results of Campanella et al. (2001) who carried out similar experiments with different morphing techniques}, web_url = {http://www.twk.tuebingen.mpg.de/twk02/}, event_name = {5. Tübinger Wahrnehmungskonferenz (TWK 2002)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 1111, title = {Recognizing faces across different views: does caricaturing help?}, year = {2002}, month = {2}, volume = {5}, pages = {83}, abstract = {Caricatured faces are recognized as quickly and accurately as (and sometimes faster and better than) the veridical versions (Benson & Perrett, 1994). This “caricature effect” (CE) has been demonstrated only for the frontal view of faces and only when the caricatures were presented during the testing phase. First, we investigated whether the caricature effect generalizes across changes in viewpoint (frontal, three-quarter, and profile). Second, we examined the effect of presenting caricatured faces during the learning phase, which we term the “reverse caricature effect” (RCE). Face recognition performance was tested using two tasks: an old/new recognition paradigm and a sequential matching task. Observers learned faces either in the frontal, three-quarter, or profile views, and were tested with all three viewpoints. Half of the subjects participated in the CE condition (learning with veridicals, testing with caricatures) and the other half of the subjects participated in the RCE condition (learning with caricatures, testing with veridicals). The caricatures were created using a 3D face morphing algorithm (Blanz & Vetter, 1999). Accuracy sensitivity was measured using d’. For the CE condition, caricatures were recognized more accurately than veridical versions of the same face (mean d’: sequential matching: caricature=1.15, veridical=1.09; Old/New: caricature=1.42, veridical=1.18). This difference was (nearly) significant (sequential matching: F(2,58)=28, p<0.001; Old/ New: F(1, 162)=3.19, p=0.076). The interaction between face caricature level and viewpoint (testing view and/or learning view) was not significant. This suggests that the caricature effect generalizes across viewpoint. Similar results were found for the RCE condition. These results are discussed within the framework of a face space model for representing faces.}, file_url = {/fileadmin/user_upload/files/publications/pdf1111.pdf}, web_url = {http://www.twk.tuebingen.mpg.de/twk02/}, event_name = {5. Tübinger Wahrnehmungskonferenz (TWK 2002)}, event_place = {Tübingen, Germany}, state = {published}, author = {Knappmeyer B{babsy}{Department Human Perception, Cognition and Action}; Tappe C; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 1133, title = {Gender, average heads and categorical perception}, journal = {Journal of Vision}, year = {2001}, month = {12}, volume = {1}, number = {3}, pages = {281}, abstract = {Background: Our visual system uses a sophisticated mechanism called categorical perception to discriminate between highly similar objects. Small perceptual differences are enhanced thus creating clear boundaries between groups of items. Purpose: Although it seems to be an easy task to classify people by gender, we wondered whether facial information was sufficient for this purpose. Using the morphing technique of Blanz and Vetter (1999) we built an average three-dimensional head model from a database of 200 laser-scanned faces. We constructed an artificial gender continuum of this average head and used the faces in categorization and discrimination experiments. Results: Gender information was present in our face set and was easily identified by the participants. However when we tested for the existence of a categorical effect, we found no evidence of enhanced discrimination for faces straddling the gender category boundary. In previous studies we found also no evidence of categorical perception when using faces of individuals (Buelthoff & Newell, 2000). Our results with average faces confirm the previous findings and avoid any personal distinctive features that might interfere with the analysis. Furthermore, the use of average faces insures to have endpoint faces situated at approximately equal distance from the gender boundary. Conclusion: The absence of a categorical effect is surprising. Categorical perception has been shown repeatedly for other information displayed by faces (expressions and identity). Although we can tell quite reliably the sex of a face, there is no evidence of a distorted perceptual space for face gender. Furthermore our results show that categorical perception does not always exist when similar items are categorized, not even for an important category like faces. Clearly, despite its enormous importance for social interactions we have not learned to deal with the gender of faces very effectively.}, file_url = {/fileadmin/user_upload/files/publications/pdf1133.pdf}, web_url = {http://journalofvision.org/1/3/281}, event_name = {First Annual Meeting of the Vision Sciences Society (VSS 2001)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/1.3.281}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 1134, title = {Average faces and gender categories: no evidence of categorical perception}, journal = {Perception}, year = {2001}, month = {8}, volume = {30}, number = {ECVP Abstract Supplement}, pages = {54}, abstract = {Categorical perception is a sophisticated mechanism which allows our visual system to discriminate between highly similar objects. Perceptually, physical differences between groups of objects are enhanced as compared to equal-sized differences within a group of objects, thus creating clear boundaries between groups of items. Humans are expert in face recognition. Does a categorical perception mechanism help us to differentiate between male and female faces? Using a three-dimensional morphing technique, we built an average.}, web_url = {http://pec.sagepub.com/content/30/1_suppl/1.full.pdf+html}, event_name = {Twenty-fourth European Conference on Visual Perception}, event_place = {Kuşadasi, Turkey}, state = {published}, DOI = {10.1177/03010066010300S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 3193, title = {Cortical regions associated with the sense of justice and legal rules}, journal = {NeuroImage}, year = {2001}, month = {6}, volume = {13}, number = {6:Supplement}, pages = {S473}, abstract = {In law theory, an important distinction is made between legal reasoning and a natural, intuitive sense of justice. We designed a simple word-based experiment to investigate with fMRI whether the neural correlates of making law-based or justice-ba\cd decisions are different.}, web_url = {http://www.sciencedirect.com/science?_ob=MiamiImageURL&_cid=272508&_user=29041&_pii=S1053811901918166&_check=y&_origin=search&_coverDate=30-Jun-2001&view=c&wchp=dGLbVlB-zSkzV&md5=1462545766370bd354ea0f91beaf5f3d/1-s2.0-S1053811901918166-main.pdf}, state = {published}, DOI = {10.1016/S1053-8119(01)91816-6}, author = {Schultz J{johannes}; Goodenough OR; Frackowiak R; Frith CD} } @Poster{ 1114, title = {The caricature effect across viewpoint changes in face perception}, year = {2000}, month = {11}, day = {16}, abstract = {The finding that caricatures are recognized more quickly and accurately than veridical faces has been demonstrated only for frontal views of human faces (e.g., Benson & Perrett, 1994). In the present study, we investigated whether there is also a “caricature effect” for three-quarter and profile views. Furthermore, we examined what happens to the caricature advantage when generalizing across view changes. We applied a 3D caricature algorithm to laser scanned head models. In a sequential matching task, we systematically varied the view of the target faces (left/right profile, left/right three-quarter, full-face), the view of the test faces (left/right profile, left/right threequarter, fullface) and the face type (anticaricature, veridical, caricature). The caricature effect was replicated for frontal views. We also found a clear caricature advantage for three-quarter and profile views. When generalizing across views, the caricature advantage was present for the majority of view change conditions. In a few conditions, there was an anticaricature advantage.}, web_url = {http://www.opam.net/opam2000/OPAM_2000_Pro.pdf}, event_name = {8th Annual Workshop on Object Perception and Memory (OPAM 2000)}, event_place = {New Orleans, LA, USA}, state = {published}, author = {Cheng CY{yicheng}; Knappmeyer B{babsy}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 109, title = {Investigating categorical perception of gender with 3-D morphs of familiar faces}, journal = {Perception}, year = {2000}, month = {8}, volume = {29}, number = {ECVP Abstract Supplement}, pages = {57}, abstract = {We could find no evidence for categorical perception of face gender using unfamiliar human faces (I Bülthoff et al, 1998 Perception 27 Supplement, 127a). Therefore we have investigated whether familiarising participants with the stimuli prior to testing might favour categorical perception. We created artificial gender continua using 3-D morphs between laser-scanned heads. The observers had to classify all faces according to their gender in a classification task. If perception of face gender is categorical, we would expect participants to classify the morphs into two distinct gender categories. Furthermore, they should differentiate pairs of morphs that straddle the gender boundary more accurately than other pairs in a discrimination task. The participants were familiarised before testing with half of the faces used for creating the morphs. They could categorise most familiar and unfamiliar faces into distinctive gender categories. Thus, they could extract the gender information and use it to classify the images. On the other hand, we found no evidence of increased discriminability for the morph pairs that straddle the gender boundary. Apparently, observers did not perceive the gender of a face categorically, even when these faces were familiar to them.}, file_url = {/fileadmin/user_upload/files/publications/pdf109.pdf}, web_url = {http://pec.sagepub.com/content/29/1_suppl/1.full.pdf+html}, event_name = {23rd European Conference on Visual Perception (ECVP 2000)}, event_place = {Groningen, Netherlands}, state = {published}, DOI = {10.1177/03010066000290S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 110, title = {There is no categorical effect for the discrimination of face gender using 3D-morphs of laser scans of heads}, journal = {Investigative Ophthalmology & Visual Science}, year = {2000}, month = {5}, volume = {41}, number = {4}, pages = {S225}, file_url = {/fileadmin/user_upload/files/publications/pdf110.pdf}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 2000)}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}} } @Poster{ 287, title = {Geschlechtswahrnehmung von Gesichtern, die durch 3D-Morph-Verfahren erzeugt wurden}, year = {1999}, month = {2}, pages = {52}, abstract = {Zeigt die Bestimmung der Geschlechtszugehörigkeit von Gesichtern die charakteristischen Merkmale der kategorischen Wahrnehmung? Durch ein automatisiertes 3D-Morph-Verfahren wurden aus 3D-Laser-scans von männlichen und weiblichen Köpfen Misch-Gesichter synthetisiert. Das Morph-Verfahren erlaubt sowohl die Textur als auch die Form eines Gesichtes zu verändern, so daß Pigmentation und Form zwischen männlichen und weiblichen Gesichtern kontinuierlich angepaßt werden können. Andere geschlechtsspezifische Merkmale wie Frisur, Bart, Make-up oder Schmuck wurden weggelassen oder computergraphisch entfernt. Alle Gesichter wurden in frontaler oder seitlicher Ansicht (3/4-view) mit neutralem Gesichtsausdruck präsentiert. Versuchspersonen haben zuerst eine Diskriminationsaufgabe (XAB-Test) durchgeführt und danach wurde die subjektive Geschlechtsgrenze entlang des Morph-Kontinuums in einer Kategorisierungsaufgabe bestimmt. Es zeigte sich für alle Versuchspersonen die typische Stufenfunktion in der Kategorisierungsaufgabe. Im XAB-Test war es jedoch für die Versuchspersonen nicht einfacher, ein Gesichtspaar zu unterscheiden, das durch die putative kategorische Geschlechtsgrenze getrennt war als für Gesichtspaare an dem mehr weiblichen oder männlichen Ende des Morph-Kontinuums. Unsere Experimente zeigen, daß das Geschlecht eines Gesichts nicht kategorisch wahrgenommen wird.}, file_url = {/fileadmin/user_upload/files/publications/pdf287.pdf}, web_url = {http://www.twk.tuebingen.mpg.de/twk99/}, event_name = {2. Tübinger Wahrnehmungskonferenz (TWK 99)}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}; Vetter T{vetter}{Department Human Perception, Cognition and Action}} } @Article{ 184, title = {Effects of parametric manipulation of inter-stimulus similarity on 3D object categorization}, journal = {Spatial Vision}, year = {1999}, month = {1}, volume = {12}, number = {1}, pages = {107-123}, abstract = {To explore the nature of the representation space of 3D objects, we studied human performance in forced-choice categorization of objects composed of four geon-like parts emanating from a common center. Two categories were defined by prototypical objects, distinguished by qualitative properties of their parts (bulging vs waist-like limbs). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90% correct-response performance level. After training, in the first experiment, 11 subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (ORTHO) and in parallel (PARA) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the ORTHO parameter-space displacement between the stimulus and the corresponding prototype; the effect of the PARA displacement was weaker. Thus, the parameter-space location of the stimuli mattered more than the qualitative contrasts, which were always present. To find out whether both prototypes or just the nearest one to the test shape influenced the decision, in the second experiment we varied the similarity between the categories. Specifically, in the test stage trials the distance between the two prototypes could assume one of three values (FAR, INTERMEDIATE, and NEAR). For the 13 subjects who performed above chance, the error rate (on physically identical stimuli) in the NEAR condition was higher than in the other two conditions. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a representation space in which distances to more than one reference point or prototype are encoded (Edelman, 1998).}, file_url = {/fileadmin/user_upload/files/publications/pdf184.pdf}, web_url = {http://www.ingentaconnect.com/search/download?pub=infobike%3a%2f%2fvsp%2fspv%2f1999%2f00000012%2f00000001%2fart00006&mimetype=application%2fpdf&exitTargetId=1309268380820}, state = {published}, DOI = {10.1163/156856899X00067}, author = {Edelman S{edelman}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 3194, title = {Mutational analysis of the yeast DED1 protein}, journal = {Translation and Stability of mRNA Meeting at the Palo Alto Institute of Molecular Medicine}, year = {1999}, volume = {1999}, state = {published}, author = {Schultz J{johannes}; Boeck R; Linder P} } @Inproceedings{ 251, title = {Prime-orientation dependence in detection of camouflaged biological motion}, year = {1998}, month = {8}, pages = {314-319}, editor = {Grondin, S. , Y. Lacouture}, publisher = {International Society for Psychology}, address = {Quebec, Canada}, booktitle = {Fechner Day 98}, event_name = {14th Annual Meeting of the International Society for Psychophysics}, event_place = {Quebec, Canada}, state = {published}, author = {Pavlova MA; Sokolov AN; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffNVB1998, title = {Gender perception of 3-D head laser scans}, journal = {Perception}, year = {1998}, month = {8}, volume = {27}, number = {ECVP Abstract Supplement}, pages = {127}, abstract = {We investigated whether the judgment of face gender shows the typical characteristics of categorical perception. As stimuli we used images of morphs created between pairs of male/female 3-D head laser scans. In experiment 1, texture and shape were morphed between both faces. In experiment 2, either the average texture of all faces was mapped onto the shape continuum between the two faces or we mapped the texture continuum between each face pair onto an average shape face. Thus, either the shape or the texture remained constant in any one condition. The subjects viewed these morphs first in a discrimination task (XAB) and then in a categorisation task which was used to locate the subjective gender boundary between each male/female face pair. Although we found that subjects could categorise the face images by their gender in the categorisation task and that texture alone is a better gender indicator than shape alone, the subjects did not discriminate more easily between face images situated at the category boundary in any of our discrimination experiments. We argue that we do not perceive the gender of a face categorically and that more cues are needed to decide the gender of a person than those provided by the faces only.}, web_url = {http://pec.sagepub.com/content/27/1_suppl.toc}, event_name = {21st European Conference on Visual Perception}, event_place = {Oxford, UK}, state = {published}, DOI = {10.1177/03010066980270S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}; Vetter T{vetter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 1123, title = {Gender perception of 3D head laser scans}, journal = {Perception}, year = {1998}, month = {8}, volume = {27}, number = {ECVP Abstract Supplement}, pages = {127}, abstract = {We investigated whether the judgment of face gender shows the typical characteristics of categorical perception. As stimuli we used images of morphs created between pairs of male/female 3-D head laser scans. In experiment 1, texture and shape were morphed between both faces. In experiment 2, either the average texture of all faces was mapped onto the shape continuum between the two faces or we mapped the texture continuum between each face pair onto an average shape face. Thus, either the shape or the texture remained constant in any one condition. The subjects viewed these morphs first in a discrimination task (XAB) and then in a categorisation task which was used to locate the subjective gender boundary between each male/female face pair. Although we found that subjects could categorise the face images by their gender in the categorisation task and that texture alone is a better gender indicator than shape alone, the subjects did not discriminate more easily between face images situated at the category boundary in any of our discrimination experiments. We argue that we do not perceive the gender of a face categorically and that more cues are needed to decide the gender of a person than those provided by the faces only.}, web_url = {http://pec.sagepub.com/content/27/1_suppl.toc}, event_name = {21st European Conference on Visual Perception}, event_place = {Oxford, UK}, state = {published}, DOI = {10.1177/03010066980270S101}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}; Vetter T{vetter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 1065, title = {Perception of a camouflaged point-light walker: a differential priming effect}, journal = {Perception}, year = {1998}, month = {8}, volume = {27}, number = {ECVP Abstract Supplement}, pages = {123}, abstract = {Recently, we showed that recovery of a priori known structure from biological motion leveled off with changing display orientation (eg Pavlova and Sokolov, 1997 Perception 26 Supplement, 92). How does image-plane rotation of a prime affect detection of a camouflaged point-light walker? At each of five randomly presented display orientations between upright and inverted (0°, 45°, 90°, 135°, and 180°), viewers saw a sequence of displays (each display for 1 s). Half of them comprised a camouflaged point-light walker, and half a 'scrambled-walker' mask. In a confidence-rating procedure, observers judged whether a walker was present. Prior to each experimental sequence, they were primed (for 10 s) either with an upright-, 45°-, 90°-, or 180°-oriented sample of the walker. Pronounced priming effects were found only with an upright-oriented prime: it improved detectability for the same-oriented displays, and to a lesser extent for 45°. With 45°-prime, sensitivity for 0°-, 45°-, and 90°-oriented displays was higher than for 135° and 180°. However, with 90°- and 180°-primes ROC curves for all orientations were situated close to one another. These findings indicate that the priming effect in biological motion is partly independent of the relative orientation of priming and primed displays. Moreover, it occurs only if a prime corresponds to a limited range of deviations from upright orientation within which display is spontaneously recognisable despite a discrepancy between event kinematics and dynamics (Pavlova, 1996 Perception 25 Supplement, 6). The primacy of dynamic constraints in the perception of structure from biological motion is discussed.}, web_url = {http://pec.sagepub.com/content/27/1_suppl.toc}, event_name = {21st European Conference on Visual Perception}, event_place = {Oxford, UK}, state = {published}, DOI = {10.1177/03010066980270S101}, author = {Pavlova MA; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Sokolov AN} } @Article{ 148, title = {Top-down influences on stereoscopic depth-perception}, journal = {Nature Neuroscience}, year = {1998}, month = {7}, volume = {1}, number = {3}, pages = {254-257}, abstract = {The interaction between depth perception and object recognition has important implications for the nature of mental object representations and models of hierarchical organization of visual processing. It is often believed that the computation of depth influences subsequent high-level object recognition processes, and that depth processing is an early vision task that is largely immune to 'top-down' object-specific influences, such as object recognition. Here we present experimental evidence that challenges both these assumptions in the specific context of stereoscopic depth-perception. We have found that observers' recognition of familiar dynamic three- dimensional (3D) objects is unaffected even when the objects' depth structure is scrambled, as long as their two-dimensional (2D) projections are unchanged. Furthermore, the observers seem perceptually unaware of the depth anomalies introduced by scrambling. We attribute the latter result to a top-down recognition-based influence whereby expectations about a familiar object's 3D structure override the true stereoscopic information.}, file_url = {/fileadmin/user_upload/files/publications/pdf148.pdf}, web_url = {http://www.nature.com/neuro/journal/v1/n3/pdf/nn0798_254.pdf}, state = {published}, DOI = {10.1038/699}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Sinha P{pawan}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 344, title = {Recovery of a priori known structure from biological motion}, year = {1998}, month = {7}, pages = {64-68}, editor = {B. Bril, A. Ledebt, G. Dietrich , A. Roby-Brami}, publisher = {Editions EDK}, address = {Paris, France}, booktitle = {Advances in Perception-Action Coupling}, event_name = {Fifth European Workshop on Ecological Psychology (EWEP 5)}, event_place = {Pont-à-Mousson, France}, state = {published}, ISBN = {2842540123}, author = {Pavlova MA; Sokolov AN; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 1125, title = {Effects of shape and texture on the perceptual categorization of gender in faces}, journal = {Investigative Ophthalmology & Visual Science}, year = {1998}, month = {5}, volume = {39}, number = {4}, pages = {173}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {Newell FN{fiona}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Vetter T{vetter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 1124, title = {Is the gender of a face categorically perceived?}, journal = {Investigative Ophthalmology & Visual Science}, year = {1998}, month = {5}, volume = {39}, number = {4}, pages = {171}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Newell FN{fiona}{Department Human Perception, Cognition and Action}; Vetter T{vetter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 283, title = {Masking a point-light walker}, year = {1998}, month = {2}, pages = {120}, abstract = {In spite of potential perceptual ambiguity of a point-light walking figure, with upright display orientation observers can readily recover the invariant structure from biological motion. However, regardless of the same low-level relations between moving dots within upright and inverted orientation, perception of a point-light walker is dramatically impeded with 180º-display inversion. Spontaneous recognition was found to improve abruptly with changing display orientation from inverted to upright (Pavlova, 1996, Perception 25, Suppl.). This evidence implies that the visual system implements additional processing constraints for the unambiguous interpretation of biological motion. We used a masking paradigm to study the processing constraints in biological motion perception. At each of randomly presented five orientations (0°, 45°, 90°, 135°, and 180°), viewers saw a sequence of 210 displays. Half of them comprised a canonical 11 pointlight walker, and half a partly distorted walker, in which rigid pair-wise connections between moving dots were perturbed. A 66-dot “scrambled-walker” mask camouflaged both figures. Prior each experimental sequence, a sample of a canonical walker in respective orientation was demonstrated. Observers judged whether a canonical figure was present. A jackknife estimating of the ROC parameters indicated that detectability leveled off with changing orientation from upright to 135°, and then slightly increased to display inversion. However, even with 135° and 180° it was above chance. For orientations 0°, 45° and 90°, perceptual learning to detect a canonical walker proceeded rather rapidly in the course of the experiment. Comparison with the data on spontaneous recognition of biological motion suggests that display orientation affects bottom-up processing of biological motion more strongly than top-down. We suppose that some processing constraints (such as axis-of-symmetry, dynamic constraints) in perception of biological motion be hierarchically nested. Dynamic constraints appear to be the most powerful: the highest detectability was found with upright orientation. While with changing orientation these constraints lose their strength, others processing constraints are getting more influential. For instance, the lower sensitivity for 135° as compared to 180° might be accounted for by the axis-of-symmetry constraint that is implemented by the visual system at 180°. Likewise, due to the inefficiency of this constraint, biological motion pattern is perceived as more multistable with 90°-150°, as compared to 180° display orientation.}, web_url = {http://www.twk.tuebingen.mpg.de/twk98/}, event_name = {1. Tübinger Wahrnehmungskonferenz (TWK 98)}, event_place = {Tübingen, Germany}, state = {published}, author = {Pavlova MA; Sokolov AN; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Techreport{ 1512, title = {View-based representations for dynamic 3D object recognition}, year = {1997}, month = {2}, number = {47}, abstract = {Much of the experimental and computational modeling research on human recognition processes has focused exclusively on the domain of static three-dimensional (3D) objects. The issue of the nature of internal representations underlying dynamic 3D object recognition is largely unexplored. Here we examine this issue, with emphasis on view-point dependency, using variants of biological motion sequences of the kind described by Johansson (1973). Our first experiment investigated whether observers exhibit the well-known canonical view-point effect while recognizing 3D biological motion sequences. Results showing a markedly impaired recognition performance with sequences recorded from unusual view-points provide preliminary evidence for the role of view-point familiarity and the inability of the visual system to extract view-independent representations. Next, to examine whether the motion traces used for recognition preserve 3D information, or are largely 2D, we developed a special class of biological motion sequences. The distinguishing characteristic of these sequences was that while they preserve the `normal' 2D projections from one view-point, their 3D structures were randomized. View-points preserving the `normal' 2D projections yielded vivid biological motion percepts, whereas other viewpoints yielded percepts of randomly moving dots. In the final set of experiments we examined whether this result could be an outcome of a recognition-dependent top-down suppression of anomalies in 3D structures. Our results indicate that subjects' expectations about 3D structure can suppress the bottom-up depth information provided by binocular stereo. Taken together, these findings suggest that biological motion sequences are represented by the human visual system as 2D traces rather than as 3D structural descriptions, and that the perception of 3D structure may be based not only upon low-level processes but also upon recognition-dependent top-down influences.}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Sinha P{pawan}{Department Human Perception, Cognition and Action}} } @Techreport{ 1504, title = {Features of the representation space for 3D objects}, year = {1996}, month = {9}, number = {40}, abstract = {To explore the nature of the representation space of 3D objects, we studied human performance in forced-choice classification of objects composed of four geon-like parts, emanating from a common center. The two class prototypes were distinguished by qualitative contrasts (bulging vs.\ waist-like limbs). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90% correct-response performance level. In the first experiment, 11 subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (Ortho) and in parallel (Para) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the Ortho parameter-space displacement between the stimulus and the corresponding prototype (the effect of the Para displacement was marginal). Clearly, the parameter-space location of the stimuli mattered more than the qualitative contrasts (which were always present). To find out whether both prototypes or just the nearest neighbor of the test shape influenced the decision, in the second experiment we tested 18 new subjects on a fixed set of shapes, while the test-stage distance between the two classes assumed one of three values (Far, Intermediate, and Near). For the 13 subjects who performed above chance, the error rate (on physically identical stimuli) in the Near condition was higher than in the other two conditions. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a metric representation space, with the subjects' performance determined by distances to more than one reference point or prototype.}, file_url = {/fileadmin/user_upload/files/publications/pdf1504.pdf}, state = {published}, author = {Edelman S{edelman}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Conference{ 1121, title = {Features of the representation space for 3D objects}, journal = {Perception}, year = {1996}, month = {9}, volume = {25}, number = {ECVP Abstract Supplement}, pages = {49-50}, abstract = {To explore the nature of the representation space of 3-D objects, we studied human performance in forced-choice classification of objects composed of four geon-like parts, emanating from a common centre. The two class prototypes were distinguished by qualitative contrasts (cross-section shape; bulge/waist), and by metric parameters (degree of bulge/waist, taper ratio). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90% correct-response performance level. In experiment 1, eleven subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (Ortho), and in parallel (Para) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the Ortho parameter-space displacement between the stimulus and the corresponding prototype: F1,68=3.6, p<0.06 (the effect of the Para displacement was marginal). Clearly, the parameter-space location of the stimuli mattered more than the qualitative contrasts (which were always present). To find out whether both prototypes or just the nearest neighbour of the test shape influenced the decision, in experiment 2 eight new subjects were tested on a fixed set of shapes, while the test-stage distance between the two classes assumed one of three values (Far, Intermediate, or Near). For the six subjects who performed above chance, the error rate (on physically identical stimuli) in the Near condition was higher than in the other two conditions: F1,89=3.7, p<0.06. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a metric representation space with the subjects' performance determined by distances to more than one reference point or prototype (cf Edelman, 1995 Minds and Machines 5 45 - 68).}, web_url = {http://pec.sagepub.com/content/25/1_suppl.toc}, event_name = {19th European Conference of Visual Perception}, event_place = {Strasbourg, France}, state = {published}, author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Edelman S{edelman}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 568, title = {Interdependence of feature dimensions in the representation of 3D objects}, journal = {Investigative Ophthalmology & Visual Science}, year = {1996}, month = {4}, volume = {37}, number = {3}, pages = {S1125}, abstract = {Purpose. The dimensions of the representation space of 3D objects may be independent, if nonaccidental - generic or qualitative shape contrasts serve as the distinguishing features. Alternatively, the dimensions can be interdependent, as predicted by some theories that postulate metric feature-space representations. To explore this issue, we studied human performance in forced-choice classification of objects composed of 4 geon-like parts, emanating from a common center. Methods. The two class prototypes were distinguished by qualitative contrasts (cross-section shape; bulge/waist), and by metric parameters (degree of bulge/waist, taper ratio). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90% correct-response performance level. Subsequent trials involved both original and modified versions of the prototypes; the latter were obtained by varying the metric parameters both orthogonally (ORTHO) and in parallel (PARA) to the line connecting the prototypes in the parameter space. Results. 8 out of 11 subjects succeeded to learn the task within the allotted time. For these subjects, the error rates increased progressively with the parameter-space displacement between the stimulus and the corresponding prototype. The effect of ORTHO displacement was significant: F(1, 68) = 3.6, p < 0.06. There was also a hint of a marginal PARA displacement effect: F(1, 68) = 1.9, p = 0.17 Conclusions. Theories that postulate exclusive reliance on qualitative contrasts (such as Biederman's Recognition By Components) predict near-perfect discrimination performance for stimuli derived from the prototypes both by PARA and by ORTHO parameter-space displacement. Our results contradict this prediction, and support the notion of a metric representation space, in which any displacement away from the familiar region incurs performance costs.}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {Edelman S{edelman}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 566, title = {Top-down influence of recognition on stereoscopic depth perception}, journal = {Investigative Ophthalmology & Visual Science}, year = {1996}, month = {4}, volume = {37}, number = {3}, pages = {1125}, abstract = {Purpose. Last year we demonstrated that the recognition of biological motion sequences is consistent with a view-based recognition framework. We found that anomalies in the depth structure of 3D objects had an intriguing lack of influence on subject ratings of its figural goodness. In the present work, we attempt to explain this result by showing a strong top-down influence from high-level vision (object recognition) on early vision (stereoscopic depth perception). Methods. We used biological motion sequences of the kind first described by Johansson (Percep. & Psychophysics, 14, 201-211, 1973) to study the perception of 3D structure of human-like versus randomly moving dots displayed in stereo. The depth structure of the human sequence was altered by adding controlled amounts of depth noise (that left the 2D projections largely unchanged). "Random" sequences were created by adding x-y positional noise to the "Human" sequences. In a 2AFC task, participants had to decide whether 3 randomly chosen dots from stereoscopically displayed dot motion sequence appeared at the same distance from the observer. Results. Subject performance was significantly (p < 0.005) better with "random" sequences than with "human" ones. In a human sequence triples drawn from the same limb were often perceived as being in one depth plane irrespective of their actual "distorted" 3D configuration. Conclusions. Those results indicate the existence of top-down object-specific influences that suppress the perception of deviations from the expected 3D structure in a motion sequence. The absence of such an influence for novel structures might account for subjects' better performance with the random sequences.}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1996)}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Sinha P{pawan}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 576, title = {Illusory motion from shadows}, journal = {Nature}, year = {1996}, month = {1}, volume = {379}, number = {6560}, pages = {31}, web_url = {http://www.nature.com/nature/journal/v379/n6560/pdf/379031a0.pdf}, state = {published}, DOI = {10.1038/379031a0}, author = {Kersten D{kersten}{Department Human Perception, Cognition and Action}; Knill DC; Mamassian P{pascal}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 1120, title = {Recognizing biological motion sequences}, journal = {Perception}, year = {1995}, month = {8}, volume = {24}, number = {ECVP Abstract Supplement}, pages = {112}, event_name = {18th European Conference on Visual Perception}, event_place = {Tübingen, Germany}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Sinha P{pawan}{Department Human Perception, Cognition and Action}} } @Poster{ 1122, title = {View-based representations for biological motion sequences}, journal = {Investigative Ophthalmology & Visual Science}, year = {1995}, month = {5}, volume = {36}, number = {4}, pages = {S417}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1995)}, event_place = {Fort Lauderdale, FL, USA}, state = {published}, author = {Sinha P{pawan}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ 680, title = {General lighting can overcome accidental viewing}, journal = {Investigative Ophthalmology & Visual Science}, year = {1994}, month = {5}, volume = {35}, number = {4}, pages = {1741}, event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1994)}, event_place = {Sarasota, FL, USA}, state = {published}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Kersten D{kersten}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 3195, title = {Distribution and morphology of nitric oxid-positive neurons in the cerebral cortex during pre- and postnatal development}, journal = {Annual Meeting of the Swiss Societies for Experimental Biology (USGEB)}, year = {1994}, month = {3}, volume = {1994}, state = {published}, author = {Schultz J{johannes}; Hornung J-P} } @Article{ 833, title = {GABA-antagonist inverts movement and object detection in flies}, journal = {Brain Research}, year = {1987}, month = {3}, volume = {407}, number = {1}, pages = {152-158}, abstract = {Movement detection is one of the most elementary visual computations performed by vertebrates as well as invertebrates. However, comparatively little is known about the biophysical mechanisms underlying this computation. It has been proposed on both physiological1.8.21 and theoretical2.15.23 grounds that inhibition plays a crucial role in the directional selectivity of elementary movement detectors (EMDs). For the first time, we have studied electrophysiological and behavioral changes induced in flies after application of picrotoxinin, an antagonist of GABA. The results show that inhibitory interactions play an important role in movement detection in flies. Furthermore, our behavioral results suggest that the computation of object position is based primarily on movement detection.}, file_url = {/fileadmin/user_upload/files/publications/pdf833.pdf}, web_url = {http://www.sciencedirect.com/science/article/pii/0006899387912303}, state = {published}, DOI = {10.1016/0006-8993(87)91230-3}, author = {B\"ulthoff HH{hhb}; B\"ulthoff I{isa}} } @Article{ 823, title = {Combining Neuropharmacology and Behavior to Study Motion Detection in Flies}, journal = {Biological Cybernetics}, year = {1987}, month = {2}, volume = {55}, number = {5}, pages = {313-320}, abstract = {The optomotor following response, a behavior based on movement detection was recorded in the fruitflyDrosophila melanogaster before and after the injection of picrotoxinin, an antagonist of the inhibitory neurotransmitter GABA. The directional selectivity of this response was transiently abolished or inverted after injection. This result is in agreement with picrotoxinin-induced modifications observed in electrophysiological activity of direction-selective cells in flies (Bülthoff and Schmid 1983; Schmid and Bülthoff, in preparation). Furthermore, walking and flying flies treated with picrotoxinin followed more actively motion from back to front instead of front to back as in normal animals. Since the difference in the responses to front to back and back to front motions is proposed to be the basis of fixation behavior in flies (Reichardt 1973) our results support this notion and are inconsistent with schemes explaining fixation by alternative mechanisms.}, file_url = {/fileadmin/user_upload/files/publications/combining_neuropharmacology_and_behavior_to_study_motion_detection_in_flies_823[0].pdf}, web_url = {http://www.springerlink.com/content/y056t6h64564n574/fulltext.pdf}, state = {published}, DOI = {10.1007/BF02281977}, author = {B\"ulthoff HH{hhb}; B\"ulthoff I{isa}} } @Article{ 1128, title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 3. Outer rhabdomeres absent JK84, small optics lobes KS58 and no object fixation EB12, visual mutants.}, journal = {Journal of Comparative Physiology}, year = {1986}, month = {3}, volume = {158}, number = {2}, pages = {195-202}, abstract = {Autoradiographs of the brains of the visual mutantsouter rhabdomeres absent JK84 (ora),small optic lobes KS58 (KS58) andno object fixation E B12 (B12) have been obtained by the deoxyglucose method. The patterns of metabolic activity in the optic lobes of the visually stimulated mutants is compared with that of similarly stimulated wildtype (WT) flies which was described in Part I of this work (Buchner et al. 1984b). In the mutantKS58 the optomotor following response to movement is nearly normal despite a 40–45% reduction of volume in the visual neuropils, medulla and lobula complex. InB12 flies the volume of these neuropils and the optomotor response are reduced. In autoradiographs of both mutants the pattern of neuronal activity induced by stimulation with moving gratings does not differ substantially from that in the WT. It suggests that only neurons irrelevant to movement detection are affected by the mutation. However, in the lobula plate of someKS58 flies and in the second chiasma of allB12 flies, the pattern of metabolic activity differs from that observed in WT flies. Up to now no causal relation has been found between the modifications described in behaviour or anatomy and those observed in the labelling of these mutants. In the ommatidia ofora flies the outer rhabdomeres are lacking while the central photoreceptors appear to be normal. Stimulus-specific labelling is absent in the visual neuropil of these mutants stimulated with movement or flicker. This result underlines the importance of the outer rhabdomeres for visual tasks, especially for movement detection.}, web_url = {http://link.springer.com/content/pdf/10.1007%2FBF01338562.pdf}, state = {published}, DOI = {10.1007/BF01338562}, author = {B\"ulthoff I{isa}} } @Article{ 1129, title = {Freeze-substitution of Drososphila heads for subsequent 3H-2-deoxyglucose autoradiography}, journal = {Journal of Neuroscience Methods}, year = {1985}, month = {5}, volume = {13}, number = {3-4}, pages = {183-190}, abstract = {High resolution of [3H]2-deoxyglucose labelling was obtained in autoradiographs of Drosophila brains after freeze-substitution in anhydrous acetone at −76°C. This method was applied to preparations which received visual, olfactory and mechanosensory stimulation. The autoradiographs were compared to those obtained after freeze-drying. Freeze-substitution, which has proved to be technically simple, rapid and inexpensive, yields a good quality of tissue preservation and hence is recommended for tissue dehydration prior to autoradiography.}, web_url = {http://www.sciencedirect.com/science/article/pii/0165027085900664}, state = {published}, DOI = {10.1016/0165-0270(85)90066-4}, author = {Rodrigues V; B\"ulthoff I{isa}} } @Poster{ 837, title = {Pharmacological inversion of directional specificity in movement detectors}, journal = {Investigative Ophthalmology & Visual Science}, year = {1985}, month = {5}, volume = {26}, number = {3}, pages = {56}, event_name = {Annual Spring Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1985)}, event_place = {Sarasota, FL, USA}, state = {published}, author = {B\"ulthoff HH{hhb}; B\"ulthoff I{isa}} } @Poster{ 844, title = {Umkehrung der Bewegungs- und Objektwahrnehmung durch einen GABA-Antagonisten bei Fliegen}, journal = {Verhandlungen der Deutschen Zoologischen Gesellschaft 1985}, year = {1985}, month = {5}, pages = {223}, file_url = {/fileadmin/user_upload/files/publications/umkehrung_der_bewegungs_und_objektwahrnehmung_durch_einen_gaba_antagonisten_bei_fliegen_844[0].pdf}, event_name = {78. Jahresversammlung der Deutschen Zoologischen Gesellschaft}, event_place = {Wien, Austria}, state = {published}, author = {B\"ulthoff HH{hhb}; B\"ulthoff I{isa}} } @Article{ 1127, title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 2. Optomotor blind H31 and lobula plate-less N684 visual mutants.}, journal = {Journal of Comparative Physiology}, year = {1985}, month = {1}, volume = {156}, number = {1}, pages = {25-34}, abstract = {The pattern of visually induced local metabolic activity in the optic lobes of two structural mutants ofDrosophila melanogaster is compared with the corresponding wildtype pattern which has been reported in Part I of this work (Buchner et al. 1984b). Individualoptomotor-blind H31 (omb) flies lacking normal giant HS-neurons were tested behaviourally, and those with strongly reduced responses to visual movement were processed for 3H-deoxyglucose autoradiography. The distribution of metabolic activity in the optic lobes ofomb apparently does not differ substantially from that found in wildtype. In the mutantlobula plate-less N684 (lop) the small rudiment of the lobula plate which lacks many small-field input neurons does not show any stimulus-specific labelling. The data provide further support for the hypothesis that small-field input neurons to the lobula plate are the cellular substrate of the direction-specific labelling inDrosophila (see Buchner et al. 1984b).}, web_url = {http://link.springer.com/content/pdf/10.1007%2FBF00610663.pdf}, state = {published}, DOI = {10.1007/BF00610663}, author = {B\"ulthoff I{isa}; Buchner E} } @Article{ 1126, title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 1. Wildtype}, journal = {Journal of Comparative Physiology}, year = {1984}, month = {7}, volume = {155}, number = {4}, pages = {471-483}, abstract = {Local metabolic activity was mapped in the brain ofDrosophila by the radioactive deoxyglucose technique. The distribution of label in serial autoradiographs allows us to draw the following conclusions concerning neuronal processing of visual movement information in the brain ofDrosophila. 1. The visual stimuli used (homogeneous flicker, moving gratings, reversing contrast gratings) cause only a small increase in metabolic activity in the first visual neuropil (lamina). 2. In the second visual neuropil (medulla) at least four layers respond to visual movement and reversing contrast gratings by increased metabolic activity; homogeneous flicker is less effective. 3. With the current autoradiographic resolution (2—3 μm) no directional selectivity can be detected in the medulla. 4. In the lobula, the anterior neuromere of the third visual neuropil, movement-specific activity is observed in three layers, two of which are more strongly labelled by ipsilateral front-to-back than by back-to-front movement. 5. In its posterior counterpart, the lobula plate, four movement-sensitive layers can be identified in which label accumulation specifically depends on the direction of the movement: Ipsilateral front-to-back movement labels a superficial anterior layer, back-to-front movement labels an inner anterior layer, upward movement labels an inner posterior layer and downward movement labels a superficial posterior layer. 6. A considerable portion of the stimulus-enhanced labelling of medulla and lobula complex is restricted to those columns which connect to the stimulated ommatidia. This retinotopic distribution of label suggests the involvement of movement-sensitive small-field neurons. 7. Certain axonal profiles connecting the lobula plate and the lateral posterior protocerebrum are labelled by ipsilateral front-to-back movement. Presumably different structures in the same region are labelled by ipsilateral downward movement. Conspicuously labelled foci and commissures in the central brain cannot yet be associated with a particular stimulus. The results are discussed in the light of present anatomical and physiological knowledge of the visual movement detection system of flies.}, web_url = {http://link.springer.com/content/pdf/10.1007%2FBF00611912.pdf}, state = {published}, DOI = {10.1007/BF00611912}, author = {Buchner E; Buchner S; B\"ulthoff I{isa}} } @Poster{ 840, title = {Beeinflussung der Bewegungsdetektion durch Neuropharmaka}, journal = {Verhandlungen der Deutschen Zoologischen Gesellschaft 1984}, year = {1984}, month = {6}, pages = {276}, file_url = {/fileadmin/user_upload/files/publications/beeinflussung_der_bewegungsdetektion_durch_neuropharmaka_840[0].pdf}, event_name = {77. Jahresversammlung der Deutschen Zoologischen Gesellschaft}, event_place = {Giessen, Germany}, state = {published}, author = {B\"ulthoff HH{hhb}; B\"ulthoff I{isa}; Schmid A} }