@Proceedings{ ChuangGKS2017, title = {Ambient Notification Environments}, year = {2017}, month = {4}, pages = {-}, web_url = {https://www.dagstuhl.de/de/programm/kalender/semhp/?semnr=17161}, publisher = {Leibniz-Zentrum für Informatik}, address = {Schloss Dagstuhl, Germany}, series = {Dagstuhl Reports}, event_name = {Dagstuhl Seminar 17161}, event_place = {Schloss Dagstuhl, Germany}, state = {accepted}, ISBN = {-}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Gehring S; Kay J; Schmidt A} } @Conference{ Chuang2015_3, title = {Beyond Steering in Human-Centered Closed-Loop Control}, year = {2015}, month = {11}, day = {5}, abstract = {Machines provide us with the capacity to achieve goals beyond our physical limitations. For example, automobiles and aircraft extend our physical mobility, allowing us to travel vast distances far ahead of the time it would take us otherwise. It is truly remarkable that our natural perceptual and motor capabilities are able to adapt, with sufficient training, to the unnatural demands posed by vehicle handling. While much progress has been achieved in formalizing the control relationship between the human operator and the controlled vehicle, considerably less is understood with regards to how human cognition influences this control relationship. Such an understanding is particularly important in the prevalence of autonomous vehicular control, which stands to radically modify the responsibility of the human operator from one of control to supervision. In this talk, I will first explain how the limitations of a classical cybernetics approach can reveal the necessity of understanding high-level cognition during control, such as anticipation and expertise. Next, I will present our research that relies on unobtrusive measurement techniques (i.e., gaze-tracking, EEG/ERP) to understand how human operators seek out and process relevant information whilst steering. Examples from my lab will be used to demonstrate of how such findings can effectively contribute to the development of human-centered technology in the steering domain, such as with the use of warning cues and shared control. Finally, I will briefly present some efforts in modeling an augmented aerial vehicle (e.g., civil helicopters), with the goal of making flying a rotorcraft as easy as driving (www.mycopter.eu).}, web_url = {http://inc.ucsd.edu/inc_chalk.html}, event_name = {Institute for Neural Computation: INC Chalk Talk Series}, event_place = {San Diego, CA, USA}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerBC2015_3, title = {On the influence of steering on the orienting response}, year = {2015}, month = {10}, pages = {24}, abstract = {The extent to which we experience ‚workload‘ whilst steering depends on (i) the availability of the human operator’s (presumably limited) resources and, (ii) the demands of the steering task. Typically, an increased demand of the steering task for a specific resource can be inferred from how steering modifies the components of the event-related potential (ERP), which is elicited by the stimuli of a competing task. Recent studies have demonstrated that this approach can continue to be applied even when the stimuli does not require an explicit response. Under certain circumstances, workload levels in the primary task can influence the ERPs that are elicited by task-irrelevant target events, in particular complex environmental sounds. Using this approach, the current study assesses the human operator’s resources that are demanded by different aspects of the steering task. To enable future studies to focus their analysis, we identify ERP components and electrodes that are relevant to steering demands, using mass univariate analysis. Additionally we compare the effectiveness of sound stimuli that are conventionally employed to elicit ERPs for assessing workload, namely pure-tone oddballs and environmental sounds. In the current experiment, participants performed a compensatory tracking task that required them to align a continuously perturbed target line to a stationary reference line. Task difficulty was manipulated either by varying the bandwidth of the disturbance or by varying the complexity of the controller dynamics of the steering system. Both manipulations presented two levels of difficulty (‚Easy‘ and ‚Hard‘), which could be contrasted to a baseline ‘View only’ condition. During the steering task, task-irrelevant sounds were presented to elicit ERPs: frequent pure-tone standards, rare pure-tone oddballs and rare environmental sounds. Our results show that steering task demands influence ERP components that are suggested by the previous literature to be related to the following cognitive processes, namely the call for orientation (i.e., early P3a), the orientation of attention (i.e., late P3a), and the semantic processing of the task-irrelevant sound stimuli (i.e., N400). The early P3 was decreased in the frontocentral electrodes, the late P3 centrally and the N400 centrally and over the left hemisphere. Single subject analyses on these identified components reveal differences that correspond to our manipulations of steering difficulty. More participants discriminate for above components in the ‘Hard’ relative to the ‘Easy’ condition. The current study identifies the spatial and temporal distribution of ERPs that ought to be targeted for future investigations of the influence of steering on workload. In addition, the use of task-irrelevant environmental sounds to elicit ERP indices for workload holds several advantages over conventional beep tones, especially in the operational context. Finally, the current findings indicate the involvement of cognitive processes in steering, which is typically viewed as being a predominantly visuo-motor task.}, web_url = {http://www.ipa.tu-berlin.de/bwmms/11_berliner_werkstatt_mms/}, editor = {Wienrich, C. , T. Zander, K. Gramann}, publisher = {Universitätsverlag der TU Berlin}, address = {Berlin, Germany}, booktitle = {Trends in Neuroergonomics}, event_name = {11. Berliner Werkstatt Mensch-Maschine-Systeme}, event_place = {Berlin, Germany}, state = {published}, ISBN = {978-3-7983-2803-7}, DOI = {10.14279/depositonce-4887}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2015_2, title = {Non-obtrusive measurements of attention and workload in steering}, year = {2015}, month = {9}, day = {16}, web_url = {http://dsc2015.tuebingen.mpg.de/Program.html}, event_name = {DSC 2015 Europe: Driving Simulation Conference & Exhibition}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GlatzBC2015_3, title = {Attention Enhancement During Steering Through Auditory Warning Signals}, year = {2015}, month = {9}, day = {1}, pages = {1-5}, abstract = {Nowadays modern cars integrate advanced driving assistance systems which range up to fully automated driving modes. Since fully automated driving modes have not come into everyday practice yet, operators are currently making use of assistance systems. While still being in control of the vehicle, alerts signal possible collision dangers when, for example, parking. The reason for the necessity of such warnings is the fact that humans have limited resources. A critical event can stay unnoticed simply because the attention was focused elsewhere. This raises the question: What is an effective alert in a steering environment? Auditory warning signals have been shown to efficiently direct attention. In the context of traffic, they can prevent collisions by heightening the driver's situational awareness to potential accidents.}, web_url = {http://www.auto-ui.org/15/workshops.php}, event_name = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions In conjunction with AutomotiveUI 2015 (WAADI'15)}, event_place = {Nottingham, UK}, state = {published}, ISBN = {978-1-4503-3736-6}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangB2015, title = {Towards a Better Understanding of Gaze Behavior in the Automobile}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, abstract = {Gaze-tracking technology is used increasingly to determine how and which information is accessed and processed in a given interface environment, such as in-vehicle information systems in automobiles. Typically, fixations on regions of interest (e.g., windshield, GPS) are treated as an indication that the underlying information has been attended to and is, thus, vital to the task. Therefore, decisions such as optimal instrument placement are often made on the basis of the distribution of recorded fixations. In this paper, we briefly introduce gaze-tracking methods for in-vehicle monitoring, followed by a discussion on the relationship between gaze and user-attention. We posit that gaze-tracking data can yield stronger insights on the utility of novel regions- of-interests if they are considered in terms of their deviation from basic gaze patterns. In addition, we suggest how EEG recordings could complement gaze-tracking data and raise outstanding challenges in its implementation. It is contended that gaze-tracking is a powerful tool for understanding how visual information is processed in a given environment, provided it is understood in the context of a model that first specifies the task that has to be carried out.}, web_url = {http://www.auto-ui.org/15/p/workshops/2/8_Towards%20a%20Better%20Understanding%20of%20Gaze%20Behavior%20in%20the%20Automobile_Chuang.pdf}, event_name = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions In conjunction with AutomotiveUI 2015}, event_place = {Nottingham, UK}, state = {published}, ISBN = {8-1-4503-3736-6}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ LockenBMCSAM2015, title = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, abstract = {Informing a driver of the vehicle’s changing state and environment is a major challenge that grows with the introduction of automated in-vehicle assistant and infotainment systems. Poorly designed systems could compete for the driver’s attention, away from the primary driving task. Thus, such systems should communicate information in a way that conveys its relevant urgency. While some information is unimportant and should never distract a driver from important tasks, there are also calls for action, which a driver should not be able to ignore. We believe that adaptive ambient displays and peripheral interaction could serve to unobtrusively present information while switching the driver’s attention when needed. This workshop will focus in promoting an exchange of best known methods, by discussing challenges and potentials for this kind of interaction in today’s scenarios as well as in future mixed or full autonomous traffic. The central objective of this workshop is to bring together researchers from different domains and discuss innovative, and engaging ideas and a future landscape for research in this area.}, web_url = {http://www.auto-ui.org/15/p/workshopproposals/WAADI.pdf}, event_name = {Workshop on Adaptive Ambient In-Vehicle Displays and Interactions In conjunction with AutomotiveUI 2015 (WAADI'15)}, event_place = {Nottingham, UK}, state = {published}, author = {L\"ocken A; Borojeni SS; M\"uller H; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Schroeter R; Alvarez I; Meijering V} } @Inproceedings{ RienerAJCJPC2015, title = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions}, year = {2015}, month = {9}, day = {1}, pages = {1-4}, web_url = {http://www.auto-ui.org/15/workshops.php}, event_name = {Workshop on Practical Experiences in Measuring and Modeling Drivers and Driver-Vehicle Interactions In conjunction with AutomotiveUI 2015}, event_place = {Nottingham, UK}, state = {published}, ISBN = {978-1-4503-3736-6}, author = {Riener A; Alvarez I; Jeon MP; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Ju W; Pfleging B; Chiesa M} } @Article{ BiegCBB2015, title = {Asymmetric saccade reaction times to smooth pursuit}, journal = {Experimental Brain Research}, year = {2015}, month = {9}, volume = {233}, number = {9}, pages = {2527-2538}, abstract = {Before initiating a saccade to a moving target, the brain must take into account the target’s eccentricity as well as its movement direction and speed. We tested how the kinematic characteristics of the target influence the time course of this oculomotor response. Participants performed a step-ramp task in which the target object stepped from a central to an eccentric position and moved at constant velocity either to the fixation position (foveopetal) or further to the periphery (foveofugal). The step size and target speed were varied. Of particular interest were trials that exhibited an initial saccade prior to a smooth pursuit eye movement. Measured saccade reaction times were longer in the foveopetal than in the foveofugal condition. In the foveopetal (but not the foveofugal) condition, the occurrence of an initial saccade, its reaction time as well as the strength of the pre-saccadic pursuit response depended on both the target’s speed and the step size. A common explanation for these results may be found in the neural mechanisms that select between oculomotor response alternatives, i.e., a saccadic or smooth response.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs00221-015-4323-8.pdf}, state = {published}, DOI = {10.1007/s00221-015-4323-8}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerBC2015_2, title = {On the Cognitive Demands of Different Controller Dynamics: A within-subject P300 Analysis}, year = {2015}, month = {9}, pages = {1042-1046}, abstract = {The cognitive workload of a steering task could reflect its demand on attentional as well as working memory resources under different conditions. These respective demands could be differentiated by evaluating components of the event-related potential (ERP) response to different types of stimulus probes, which are claimed to reflect the availability of either attention (i.e., novelty-P3) or working memory (i.e., target-P3) resources. Here, a within-subject analysis is employed to evaluate the robustness of ERP measurements in discriminating the cognitive demands of different steering conditions. We find that the amplitude of novelty-P3 ERPs to task-irrelevant environmental sounds is diminished when participants are required to perform a steering task. This indicates that steering places a demand on attentional resources. In addition, target-P3 ERPs to a secondary auditory detection task vary when the controller dynamics in the steering task are manipulated. This indicates that differences in controller dynamics vary in their working memory demands.}, web_url = {http://pro.sagepub.com/content/59/1/1042.full.pdf+html}, publisher = {Sage}, address = {London, UK}, event_name = {Human Factors and Ergonomics Society Annual Meeting (HFES 2015)}, event_place = {Los Angeles, CA, USA}, state = {published}, DOI = {10.1177/1541931215591294}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GlatzBC2015_2, title = {Warning Signals With Rising Profiles Increase Arousal}, year = {2015}, month = {9}, pages = {1011}, abstract = {Auditory warnings are often used to direct a user’s attention from a primary task to critical peripheral events. In the context of traffic, in-vehicle collision avoidance systems could, for example, employ spatially relevant sounds to alert the driver to the possible presence of a crossing pedestrian. This raises the question: What is an effective auditory alert in a steering environment? Ideally, such warning signals should not only arouse the driver but also result in deeper processing of the event that the driver is being alerted to. Warning signals can be designed to convey the time to contact with an approaching object (Gray, 2011). That is, sounds can rise in intensity in accordance with the physical velocity of an approaching threat. The current experiment was a manual steering task in which participants were occasionally required to recognized peripheral visual targets. These visual targets were sometimes preceded by a spatially congruent auditory warning signal. This was either a sound with constant intensity, linearly rising intensity, or non-linearly rising intensity that conveyed time-to-contact. To study the influence of warning cues on the arousal state, different features of electroencephalography (EEG) were measured. Alpha frequency, which ranges from 7.5 to 12.5 Hz, is believed to represent different cognitive processes, in particular arousal (Klimesch, 1999). That is, greater desynchronization in the alpha frequency reflects higher levels of attention as well as alertness. Our results showed a significant decrease in alpha power for sounds with rising intensity profiles, indicating increased alertness and expectancy for an event to occur. To analyze whether the increased arousal for rising sounds resulted in deeper processing of the visual target, we analyzed the event related potential P3. It is a positive component that occurs approximately 300 ms after an event and is known to be associated with recognition performance of a stimulus (Parasuraman & Beatty, 1980). In other words, smaller P3 amplitudes indicate worse identification than larger amplitudes. Our results show that sounds with time-to-contact properties induced larger P3 responses to the targets that they cued compared to targets cued by constant or linearly rising sounds. This suggests that rising sounds with time-to-contact intensity profiles evoke deeper processing of the visual target and therefore result in better identification than events cued by sounds with linearly rising or constant intensity.}, web_url = {http://pro.sagepub.com/content/59/1/1011.full.pdf+html}, publisher = {Sage}, address = {London, UK}, event_name = {Human Factors and Ergonomics Society Annual Meeting (HFES 2015)}, event_place = {Los Angeles, CA, USA}, state = {published}, DOI = {10.1177/1541931215591402}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Paul2015, title = {Analysis on Body Perception and Distortion using Mixed Reality Environment}, year = {2015}, month = {8}, day = {17}, pages = {10}, web_url = {http://summerschool.igd-r.fraunhofer.de/summer_school_program_booklet.pdf}, event_name = {International Summer School on Visual Computing (VCSS 2015)}, event_place = {Rostock, Germany}, state = {published}, author = {Paul S{spaul}{Department Human Perception, Cognition and Action}} } @Conference{ FladBC2015_2, title = {Towards studying the influence of information channel properties on visual scanning processes}, year = {2015}, month = {8}, day = {17}, pages = {8}, web_url = {http://summerschool.igd-r.fraunhofer.de/summer_school_program_booklet.pdf}, event_name = {International Summer School on Visual Computing (VCSS 2015)}, event_place = {Rostock, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ FladBC2015_3, title = {Combined use of eye-tracking and EEG to understand visual information processing}, year = {2015}, month = {8}, pages = {115-124}, web_url = {http://summerschool.igd-r.fraunhofer.de/index2.php}, editor = {Schulz, H.-J. , B. Urban, U. von Lukas}, publisher = {Fraunhofer Verlag}, address = {Stuttgart, Germany}, event_name = {International Summer School on Visual Computing (VCSS 2015)}, event_place = {Rostock, Germany}, state = {published}, ISBN = {978-3-8396-0960-6}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ Chuang2015, title = {Error Visualization and Information-Seeking Behavior for Air-Vehicle Control}, year = {2015}, month = {7}, pages = {3-11}, abstract = {A control schema for a human-machine system allows the human operator to be integrated as a mathematical description in a closed-loop control system, i.e., a pilot in an aircraft. Such an approach typically assumes that error feedback is perfectly communicated to the pilot who is responsible for tracking a single flight variable. However, this is unlikely to be true in a flight simulator or a real flight environment. This paper discusses different aspects that pertain to error visualization and the pilot’s ability in seeking out relevant information across a range of flight variables.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-20816-9_1.pdf}, editor = {Schmorrow, D.D. , C.M. Fidopiastis}, publisher = {Springer International Publishing}, address = {Cham, Switzerland}, series = {Lecture Notes in Artificial Intelligence ; 9183}, booktitle = {Foundations of Augmented Cognition}, event_name = {9th International Conference on Augmented Cognition (AC 2015), held as part of HCI International 2015}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-3-319-20815-2}, DOI = {10.1007/978-3-319-20816-9_1}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2015, title = {Direct haptic feedback benefits control performance during steering}, year = {2015}, month = {3}, day = {10}, pages = {249-250}, abstract = {Haptic feedback can be introduced in control devices to improve steering performance, such as in driving and flying scenarios. For example, direct haptic feedback (DHF) can be employed to guide the operator towards an optimal trajectory. It remains unclear how DHF magnitude could interact with user performance. A weak DHF might not be perceptible to the user, while a large DHF could result in overreliance. To assess the influence of DHF, five naive participants performed a compensatory tracking task across different DHF magnitudes. During the task, participants were seated in front of an artificial horizon display and were asked to compensate for externally induced disturbances in the roll dimension by manipulating a control joystick. Our results indicate that haptic feedback benefits steering performance across all tested DHF levels. This benefit increases linearly with increasing DHF magnitude. Interestingly, shared control performance was always inferior to the same DHF system without human input. This could be due to involuntary resistance that results from the arm-dynamics.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Symeonidou E-R{esymeonidou}{Department Human Perception, Cognition and Action}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ FladBC2015, title = {Simultaneous EEG and eye-movement recording in a visual scanning task}, year = {2015}, month = {3}, day = {10}, pages = {81}, abstract = {Eye-movements can result in large artifacts in the EEG signal that could potentially obscure weaker cortically-based signals. Therefore, EEG studies are typically designed to minimize eyemovements [although see, Plöchl et al., 2012; Dimigen et al., 2011]. We present methods for simultaneous EEG and eye-tracking recordings in a visual scanning task. Participants were required to serially attend to four area-of-interests to detect a visual target. We compare EEG results, which were recorded either in the presence or absence of natural eye-movements. Furthermore, we demonstrate how natural eye-movement fixations can be reconstructed from the EOG signal, in a way that is comparable to the input from a simultaneous video-based eye-tracker. Based on these fixations, we address how EEG data can be segmented according to eye-movements (as opposed to experimentally timed stimuli). Finally, we explain how eyemovement induced artifacts can be effectively removed via independent component analysis (ICA), which allows EEG components to be classified as having either a 'cortical' or 'noncortical' origin. These methods offer the potential of measuring robust EEG signals even in the presence of natural eye-movements.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ GlatzBC2015, title = {Sounds with time-to-contact properties are processed preferentially}, year = {2015}, month = {3}, day = {10}, pages = {93}, abstract = {Sounds with rising intensities are known to be more salient than their constant amplitude counterparts [Seifritz et al., 2002]. Incorporating a time-to-contact characteristic into the rising profile can further increase their perceived saliency [Gray, 2011]. We investigated whether looming sounds with this time-to-contact profile might be especially effective as warning signals. Nine volunteers performed a primary steering task whilst occasionally discriminating oriented Gabor patches that were presented in their visual periphery. These visual stimuli could be preceded by an auditory warning cue, 1 second before they appeared. The 2000 Hz tone could have an intensity profile that was either constant (65 dB), linearly rising (60 - 75 dB, ramped tone), or exponentially increasing (looming tone). Overall, warning cues resulted in significantly faster and more sensitive detections of the visual targets. More importantly, we found that EEG potentials to the looming tone were significantly earlier and sustained for longer, compared to both the constant and ramped tones. This suggests that looming sounds are processed preferentially because of time-to-contact cues rather than rising intensity alone.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ ScheerBC2015, title = {Measuring workload during steering: A novelty-P3 study}, year = {2015}, month = {3}, day = {9}, pages = {220}, abstract = {The workload of a given task, such as steering, can be defined as the demand that it places on the limited attentional and cognitive resources of a driver. Given this, an increase in workload should reduce the amount of resources that are available for other tasks. For example, increasing workload in a primary steering task can decrease attention to oddball targets in a secondary auditory detection task. This can diminish the amplitude of its event-related potential (i.e., P3; Wickens et al., 1984). Here, we present a novel approach that does not require the participant to perform a secondary task. During steering, participants experienced a threestimuli oddball paradigm, where pure tones were intermixed with infrequently presented, unexpected environmental sounds (e.g., cat meowing). Such sounds are known to elicit a subcomponent of the P3, namely novelty-P3. Novelty-P3 reflects a passive shift of attention, which also applies to task-irrelevant events, thus removing the need for a secondary task (Ullsperger et al., 2001). We found that performing a manual steering task attenuated the amplitude of the novelty-P3, elicited by task-irrelevant novel sounds. The presented paradigm could be a viable approach to estimate workload in real-world scenarios.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ ChuangNWB2015, title = {Learning anticipatory eye-movements for control}, year = {2015}, month = {3}, day = {9}, pages = {58}, abstract = {Anticipatory eye-movements (or look-ahead fixations) are often observed in complex closedloop control tasks, such as steering a vehicle on a non-straight path (Land & Lee, 1994). This eye-movement behavior allows the observer to switch between different visual cues that are relevant for minimizing present and future control errors (Wilkie, Wann, & Allison, 2008). Here, we asked: Are anticipatory eye-movements generic or are they acquired according to the learning environment? We trained and tested 27 participants on a control system, which simulated the simplified dynamics of a rotorcraft. Participants had to translate laterally along a specified path while maintaining a fixed altitude. Ground and vertical landmarks provided respective visual cues. Training took place under one of three possible field-of-view conditions (height x width: 60° x 60°; 60° x 180°; 125° x 180°), while testing took place in an unrestricted field-of-view environment (125° x 230°). We found that restricting the field-of-view during training significantly decreases the number of anticipatory eye-movements during testing. This effect can be largely attributed to the size of the horizontal field-of-view. Our finding suggests that anticipatory eye-movements for closed-loop control are shaped by the conditions of the training environment.}, web_url = {https://www.teap.de/memory/TeaP_2015_Program2015-03-13.pdf}, event_name = {57th Conference of Experimental Psychologists (TeaP 2015)}, event_place = {Hildesheim, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Walter J{jwalter}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ PaulM2015, title = {Animated self-avatars in immersive virtual reality for studying body perception and distortions}, year = {2015}, month = {3}, pages = {1-2}, abstract = {So far in my research studies with virtual reality I have focused on using body and hand motion tracking systems in order to animate different 3D self-avatars in immersive virtual reality environments (head-mounted displays or desktop virtual reality). We are using self-avatars to explore the following basic research question: what sensory information is used to perceive ones body dimensions? And the applied question of how we can best create a calibrated selfavatar for efficient use in first-person immersive head-mounted display interaction scenarios. The self-avatar used for such research questions and applications has to be precise, easy to use and enable the virtual hand and body to interact with physical objects. This is what my research has focused on thus far and what I am developing for the completion of my first year of my graduate studies. We plan to use LEAP motion for hand and arm movements and the Moven Inertial Measurement suit for full body tracking and the Oculus DK2 head-mounted display. A several step process of setting up and calibrating an animated self-avatar with full body motion and hand tracking is described in this paper. First, the user’s dimensions will be measured, they will be given a self-avatar with these dimensions, then they will be asked to perform pre-determined actions (i.e. touching objects, walking in a specific trajectory), then we will in real-time estimate how precise the animated body and body parts are relative to the real world reference objects, and finally a scaling of the avatar size or retargetting of the motion is performed in order to meet a specific minimum error requirement.}, web_url = {https://www.researchgate.net/publication/284437866_Animated_self-avatars_in_immersive_virtual_reality_for_studying_body_perception_and_distortions}, event_name = {IEEE VR Doctoral Consortium 2015}, event_place = {Arles, France}, state = {published}, author = {Paul S{spaul}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Article{ BrowatzkiTMBW2014, title = {Active In-Hand Object Recognition on a Humanoid Robot}, journal = {IEEE Transactions on Robotics}, year = {2014}, month = {10}, volume = {30}, number = {5}, pages = {1260-1269}, abstract = {For any robot, the ability to recognize and manipulate unknown objects is crucial to successfully work in natural environments. Object recognition and categorization is a very challenging problem, as 3-D objects often give rise to ambiguous, 2-D views. Here, we present a perception-driven exploration and recognition scheme for in-hand object recognition implemented on the iCub humanoid robot. In this setup, the robot actively seeks out object views to optimize the exploration sequence. This is achieved by regarding the object recognition problem as a localization problem. We search for the most likely viewpoint position on the viewsphere of all objects. This problem can be solved efficiently using a particle filter that fuses visual cues with associated motor actions. Based on the state of the filter, we can predict the next best viewpoint after each recognition step by searching for the action that leads to the highest expected information gain. We conduct extensive evaluations of the proposed system in simulation as well as on the actual robot and show the benefit of perception-driven exploration over passive, vision-only processes at discriminating between highly similar objects. We demonstrate that objects are recognized faster and at the same time with a higher accuracy.}, web_url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6840371}, state = {published}, DOI = {10.1109/TRO.2014.2328779}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; Tikhanoff V; Metta G; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Thesis{ Browatzki2014, title = {Multimodal object perception for robotics}, year = {2014}, month = {10}, web_url = {http://elib.uni-stuttgart.de/handle/11682/4627}, web_url2 = {http://dx.doi.org/10.18419/opus-4610}, state = {published}, type = {PhD}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerBC2014, title = {Is the novelty-P3 suitable for indexing mental workload in steering tasks?}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S135-S137}, abstract = {Difficulties experienced in steering a vehicle can be expected to place a demand on one’s mental resources (O’Donnell, Eggemeier 1986). While the extent of this mental workload (MWL) can be estimated by self-reports (e.g., NASA-TLX; Hart, Staveland 1988), it can also be physiologically evaluated in terms of how a primary task taxes a common and limited pool of mental resources, to the extent that it reduces the electroencephalographic (EEG) responses to a secondary task (e.g. an auditory oddball task). For example, the participant could be primarily required to control a cursor to track a target while attending to a series of auditory stimuli, which would infrequently present target tones that should be responded to with a button-press (e.g., Wickens, Kramer, Vanasse and Donchin 1983). Infrequently presented targets, termed oddballs, are known to elicit a large positive potential after approximately 300 ms of their presentation (i.e.,P3). Indeed, increasing tracking difficulty either by decreasing the predictability of the tracked target or by changing the complexity of the controller dynamics has been shown to attenuate P3 responses in the secondary auditory monitoring task (Wickens et al. 1983; Wickens, Kramer and Donchin 1984). In contrast, increasing tracking difficulty—by introducing more frequent direction changes of the tracked target (i.e. including higher frequencies in the function that describes the motion trajectory of the target)—has been shown to bear little influence on the secondary task’s P3 response (Wickens, Israel and Donchin 1977; Isreal, Chesney, Wickens and Donchin 1980). Overall, the added requirement of a steering task consistently results in a lower P3 amplitude, relative to performing auditory monitoring alone (Wickens et al. 1983; Wickens et al. 1977; Isreal et al. 1980). Using a dual-task paradigm for indexing workload is not ideal. First, it requires participants to perform a secondary task. This prevents it from being applied in real-world scenarios; users cannot be expected to perform an unnecessary task that could compromise their critical work performance. Second, it can only be expected to work if the performance of the secondary task relies on the same mental resources as those of the primary task (Wickens, Yeh 1983), requiring a deliberate choice of the secondary task. Thus, it is fortunate that more recent studies have demonstrated that P3 amplitudes can be sensitive to MWL, even if the auditory oddball is ignored (Ullsperger, Freude and Erdmann 2001; Allison, Polich 2008). This effect is said to induce a momentary and involuntary shift in general attention, especially if recognizable sounds (e.g. a dog bark, opposed to a pure sound) are used (Miller, Rietschel, McDonald and Hatfield 2011). The current work, containing two experiments, investigates the conditions that would allow ‘novelty-P3’, the P3 elicited by the ignored, recognizable oddball, to be an effective index for the MWL of compensatory tracking. Compensatory tracking is a basic steering task that can be generalized to most implementations of vehicular control. In both experiments participants were required to use a joystick to counteract disturbances of a horizontal plane. To evaluate the generalizability of this paradigm, we depicted this horizontal plane as either a line in a simplified visualization or as the horizon in a realworld environment. In the latter, participants experienced a large field-of-view perspective of the outside world from the cockpit of an aircraft that rotated erratically about its heading axis. The task was the same regardless of the visualization. In both experiments, we employed a full factorial design for the visualization (instrument, world) and 3 oddball paradigms (in experiment 1) or 4 levels of task difficulty (in experiment 2) respectively. Two sessions were conducted on separate days for the different visualizations, which were counter-balanced for order. Three trials were presented per oddball paradigm (experiment 1) or level of task difficulty (experiment 2) in blocks, which were randomized for order. Overall, we found that steering performance was worse when the visualization was provided by a realistic world environment in experiments 1 (F (1, 11) = 42.8, p\0.01) and 2 (F (1, 13) = 35.0, p\0.01). Nonetheless, this manipulation of visualization had no consequence on our participants’ MWL as evaluated by a post-experimental questionnaire (i.e., NASATLX) and EEG responses. This suggests that MWL was unaffected by our choice of visualization. The first experiment, with 12 participants, was designed to identify the optimal presentation paradigm of the auditory oddball. For the EEG analysis, two participants had to be excluded, due to noisy electrophysiological recordings (more than 50 % of rejected epochs). Whilst performing the tracking task, participants were presented with a sequence of auditory stimuli that they were instructed to ignore. This sequence would, in the 1-stimulus paradigm, only contain the infrequent odd-ball stimulus (i.e., the familiar sound of a dog’s bark (Fabiani, Kazmerski, Cycowicz and Friedmann 1996)). In the 2-stimulus paradigm this infrequently presented oddball (0.1) is accompanied by a more frequently presented pure tone (0.9) and in the 3-stimulus paradigm the infrequently presented oddball (0.1) is accompanied by a more frequently presented pure tone (0.8) and an infrequently presented pure tone (0.1). These three paradigms are widely used in P3 research (Katayama, Polich 1996). It should be noted, however, that the target to target interval is 20 s regardless of the paradigm. To obtain the ERPs the epochs from 100 ms before to 900 ms after the onset of the recognizable oddball stimulus, were averaged. Mean amplitude measurements were obtained in a 60 ms window, centered at the group- mean peak latency for the largest positive maximum component between 250 and 400 ms for the oddball P3, for each of the three mid-line electrode channels of interest (i.e., Fz, Cz, Pz). In agreement with previous work, the novelty-P3 response is smaller when participants had to perform the tracking task compared to when they were only presented with the task-irrelevant auditory stimuli, without the tracking task (F (1, 9) = 10.9, p\0.01). However, the amplitude of the novelty-P3 differed significantly across the presentation paradigms (F (2, 18) = 5.3, p\0.05), whereby the largest response to our task-irrelevant stimuli was elicited by the 1- stimulus oddball paradigm. This suggests that the 1-stimulus oddball paradigm is most likely to elicit novelty-P3 s that are sensitive to changes in MWL. Finally, the attenuation of novelty-P3 amplitudes by the tracking task varied across the three mid-line electrodes (F (2, 18) = 28.0, p\0.001). Pairwise comparison, Bonferroni corrected for multiple comparisons, revealed P3 amplitude to be largest at Cz, followed by Fz and smallest at Pz (all p\0.05). This stands in contrast with previous work that found control difficulty to attenuate P3 responses in parietal electrodes (cf., Isreal et al. 1980; Wickens et al. 1983). Thus, the current paradigm that uses a recognizable, ignored sound is likely to reflect an underlying process that is different from previous studies, which could be more sensitive to the MWL demands of a tracking task. Given the result of experiment 1, the second experiment with 14 participants, investigated whether the 1-stimulus oddball paradigm would be sufficiently sensitive in indexing tracking difficulty as defined by the bandwidth of frequencies that contributed to the disturbance of the horizontal plane (cf., Isreal et al. 1980). Three different bandwidth profiles (easy, medium, hard) defined the linear increase in the amount of disturbance that had to be compensated for. This manipulation was effective in increasing subjective MWL, according to the results of a post- experimental NASA-TLX questionnaire (F (2, 26) = 14.9, p\0.001) and demonstrated the expected linear trend (F (1, 13) = 23.2, p\0.001). This increase in control effort was also reflected in the amount of joystick activity, which grew linearly across the difficulty conditions (F (1, 13) = 42.2, p\0.001). For the EEG analysis two participants had to be excluded due to noisy electrophysiological recordings (more than 50 % of rejected epochs). A planned contrast revealed that the novelty- P3 was significantly lower in the most difficult condition compared to the baseline viewing condition, where no tracking was done (F (1, 11) = 5.2, p\0.05; see Fig. 1a). Nonetheless, novelty-P3 did not differ significantly between the difficulty conditions (F (2, 22) = 0.13, p = 0.88), nor did it show the expected linear trend (F (1, 11) = 0.02, p = 0.91). Like (Isreal et al. 1980), we find that EEGresponses do not discriminate for MWL that is associated with controlling increased disturbances. It remains to be investigated, whether the novelty-P3 is sensitive for the complexity of controller dynamics, like it has been shown for the P3. The power spectral density of the EEG data around 10 Hz (i.e., alpha) has been suggested by (Smith, Gevins 2005) to index MWL. A post hoc analysis of our current data, at electrode Pz, revealed that alpha power was significantly lower for the medium and hard conditions, relative to the view-only condition (F (1, 11) = 6.081, p\0.05; (F (1, 11) = 6.282, p\0.05). Nonetheless, the expected linear trend across tracking difficulty was not significant (Fig. 1b). To conclude, the current results suggest that a 1-stimulus oddball task ought to be preferred when measuring general MWL with the novelty-P3. Although changes in novelty-P3 can identify the control effort required in our compensatory tracking task, it is not sufficiently sensitive to provide a graded response across different levels of disturbances. In this regard, it may not be as effective as self-reports and joystick activity in denoting control effort. Nonetheless, further research can improve upon the sensitivity of EEG metrics to MWL by investigating other aspects that better correlate to the specific demands of a steering task.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, editor = {Butz, M.V.}, publisher = {Springer}, address = {Berlin, Germany}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ GlatzBC2014, title = {Looming auditory warnings initiate earlier event-related potentials in a manual steering task}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S38}, abstract = {Automated collision avoidance systems promise to reduce accidents and relieve the driver from the demands of constant vigilance. Such systems direct the operator’s attention to potentially critical regions of the environment without compromising steering performance. This raises the question: What is an effective warning cue? Sounds with rising intensities are claimed to be especially salient. By evoking the percept of an approaching object, they engage a neural network that supports auditory space perception and attention (Bach et al. 2008). Indeed, we are aroused by and faster to respond to ‘looming’ auditory tones, which increase heart rate and skin conductance activity (Bach et al. 2009). Looming sounds can differ in terms of their rising intensity profiles. While it can be approximated by a sound whose amplitude increases linearly with time, an approaching object that emits a constant tone is better described as having an amplitude that increases exponentially with time. In a driving simulator study, warning cues that had a veridical looming profile induced earlier braking responses than ramped profiles with linearly increasing loudness (Gray 2011). In the current work, we investigated how looming sounds might serve, during a primary steering task, to alert participants to the appearance of visual targets. Nine volunteers performed a primary steering task whilst occasionally discriminating visual targets. Their primary task was to minimize the vertical distance between an erratically moving cursor and the horizontal mid-line, by steering a joystick towards the latter. Occasionally, diagonally oriented Gabor patches (108 tilt; 18 diameter; 3.1 cycles/deg; 70 ms duration) would appear on either the left or right of the cursor. Participants were instructed to respond with a button-press whenever a pre-defined target appeared. Seventy percent of the time, these visual stimuli were preceded by a 1,500 ms warning tone, 1,000 ms before they appeared. Overall, warning cues resulted in significantly faster and more sensitive detections of the visual target stimuli (F1,8 = 7.72, p\0.05; F1,8 = 9.63, p\0.05). Each trial would present one of three possible warning cues. Thus, a warning cue (2,000 Hz) could either have a constant intensity of 65 dB, a ramped tone with linearly increasing intensity from 60 dB to approximately 75 dB or a comparable looming tone with an exponentially increasing intensity profile. The different warning cues did not vary in their influence of the response times to the visual targets and recognition sensitivity (F2,16 = 3.32, p = 0.06; F2,16 = 0.10, p = 0.90). However, this might be due to our small sample size. It is noteworthy that the different warning tones did not adversely affect steering performance (F2,16 = 1.65, p\0.22). Nonetheless, electroencephalographic potentials to the offset of the warning cues were significantly earlier for the looming tone, compared to both the constant and ramped tone. More specifically, the positive component of the event- related po tential was significantly earlier for the looming tone by about 200 ms, relative to the constant and ramped tone, and sustained for a longer duration (see Fig. 1). The current findings highlight the behavioral benefits of auditory warning cues. More importantly, we find that a veridical looming tone induces earlier event-related potentials than one with a linearly increasing intensity. Future work will investigate how this benefit might diminish with increasing time between the warning tone and the event that is cued for.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Glatz C{cglatz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2014, title = {The Role of Direct Haptic Feedback in a Compensatory Tracking Task}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S71}, abstract = {Haptic feedback systems can be designed to assist vehicular steering by sharing manual control with the human operator. For example, direct haptic feedback (DHF) forces, that are applied over the control device, can guide the operator towards an optimized trajectory, which he can either augment, comply with or resist according to his preferences. DHF has been shown to improve performance (Olivari et al. submitted) and increase safety (Tsoi et al. 2010). Nonetheless, the human operator may not always benefit from the haptic support system. Depending on the amount of the haptic feedback, the operator might demonstrate an over- reliance or an opposition to this haptic assistance (Forsyth and MacLean 2006). Thus, it is worthwhile to investigate how different levels of haptic assistance influence shared control performance. The current study investigates how different gain levels of DHF influence performance in a compensatory tracking task. For this purpose, 6 participants were evenly divided into two groups according to their previous tracking experience. During the task, they had to compensate for externally induced disturbances that were visualized as the difference between a moving line and a horizontal reference standard. Briefly, participants observed how an unstable aircraft symbol, located in the middle of the screen, deviated in the roll axis from a stable artificial horizon. In order to compensate for the roll angle, participants were instructed to use the control joystick. Meanwhile, different DHF forces were presented over the control joystick for gain levels of 0, 12.5, 25, 50 and 100 %. The maximal DHF level was chosen according to the procedure described in (Olivari et al. 2014) and represents the best stable performance of skilled human operators. The participants’ performance was defined as the reciprocal of the median of the root mean square error (RMSE) in each condition. Figure 1a shows that performance improved with in- creasing DHF gain, regardless of experience levels. To evaluate the operator’s contribution, relative to the DHF contribution, we calculated the ratio of overall performance to estimated DHF performance without human input. Figure 1b shows that the subject’s contribution in both groups decreased with increasing DHF up to the 50 % condition. The contribution of experienced subjects plateaued between the 50 and 100 % DHF levels. Thus, the increase in performance for the 100 % condition can mainly be attributed to the higher DHF forces alone. In contrast, the inexperienced subjects seemed to completely rely on the DHF during the 50 % condition, since the operator’s contribution approximated 1. However, this changed for the 100 % DHF level. Here, the participants started to actively contribute to the task (operator’s contribution [1). This change in behavior resulted in performance values similar to those of the experienced group Our findings suggest that the increase of haptic support with our DHF system does not necessarily result in over-reliance and can improve performance for both experienced and inexperienced subjects.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Symeonidou E-R{esymeonidou}{Department Human Perception, Cognition and Action}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2014_2, title = {Seeking and Processing Information During Steering}, year = {2014}, month = {7}, day = {9}, web_url = {http://docplayer.net/15633537-Crc-940-related-lectures-and-talks-from-2012-to-2016.html}, event_name = {TUD Fachrichtung Psychologie: Bühler-Kolloquium}, event_place = {Dresden, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2014, title = {Understanding the Human Operator in Man-Machine Systems for Closed-Loop Control Behavior}, year = {2014}, month = {6}, day = {29}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ FladNBC2014, title = {System Delay in Flight Simulators Impairs Performance and Increases Physiological Workload}, year = {2014}, month = {6}, pages = {3-11}, abstract = {Delays between user input and the system’s reaction in control tasks have been shown to have a detrimental effect on performance. This is often accompanied by increases in self-reported workload. In the current work, we sought to identify physiological measures that correlate with pilot workload in a conceptual aerial vehicle that suffered from varying time delays between control input and vehicle response. For this purpose, we measured the skin conductance and heart rate variability of 8 participants during flight maneuvers in a fixed-base simulator. Participants were instructed to land a vehicle while compensating for roll disturbances under different conditions of system delay. We found that control error and the self-reported workload increased with increasing time delay. Skin conductance and input behavior also reflect corresponding changes. Our results show that physiological measures are sufficiently robust for evaluating the adverse influence of system delays in a conceptual vehicle model.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-07515-0_1.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Artificial Intelligence ; 8532}, booktitle = {Engineering Psychology and Cognitive Ergonomics}, event_name = {11th International Conference on Engineering Psychology and Cognitive Ergonomics (EPCE 2014)}, event_place = {Heraklion, Greece}, state = {published}, ISBN = {978-3-319-07514-3}, DOI = {10.1007/978-3-319-07515-0_1}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ScheerNBC2014, title = {The Influence of Visualization on Control Performance in a Flight Simulator}, year = {2014}, month = {6}, pages = {202-211}, abstract = {Flight simulators are often assessed in terms of how well they imitate the physical reality that they endeavor to recreate. Given that vehicle simulators are primarily used for training purposes, it is equally important to consider the implications of visualization in terms of its influence on the user’s control performance. In this paper, we report that a complex and realistic visual world environment can result in larger performance errors compared to a simplified, yet equivalent, visualization of the same control task. This is accompanied by an increase in subjective workload. A detailed analysis of control performance indicates that this is because the error perception is more variable in a real world environment.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-319-07515-0_21.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Artificial Intelligence ; 8532}, booktitle = {Engineering Psychology and Cognitive Ergonomics}, event_name = {11th International Conference on Engineering Psychology and Cognitive Ergonomics (EPCE 2014), held as Part of HCI International 2014}, event_place = {Heraklion, Greece}, state = {published}, ISBN = {978-3-319-07514-3}, DOI = {10.1007/978-3-319-07515-0_21}, author = {Scheer M{mscheer}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ SymeonidouOBC2014_2, title = {The Role of Direct Haptic Feedback in a Compensatory Tracking Task}, year = {2014}, month = {6}, web_url = {http://brain.korea.ac.kr/bce2014/?m=program}, event_name = {6th International Conference on Brain and Cognitive Engineering (BCE 2014)}, event_place = {Tübingen, Germany}, state = {published}, author = {Symeonidou E-R{esymeonidou}{Department Human Perception, Cognition and Action}; Olivari M{molivari}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ ChuangFSNB2014, title = {Closed-loop control performance and workload in a flight simulator}, year = {2014}, month = {4}, day = {1}, volume = {56}, pages = {45}, abstract = {In closed-loop control tasks (e.g., flying), the human operator is required to continuously monitor visual feedback, so as to evaluate the consequence of his actions and to correct them according to his goal. A flight simulator environment allows us to evaluate the influence of control challenges such as visual feedback delays and control disturbances without endangering the human operator. In addition, a stable simulator environment allows for more robust eye-movement and physiological recordings, which would be difficult to obtain in an actual test-flight. Eye-movement recordings can reveal the aspects of visual information that is relied on for the execution of certain maneuvers. Meanwhile, electrophysiological recordings for heart-based and skin conductance activity as well as EEG can reflect aspects of operator workload. My talk will present work on how visual feedback visualization and latency influences both control performance and workload. This will exemplify how control behavior in a flight simulator differs from that of a comparable compensatory tracking task. In doing so, I will convey the benefits and technical challenges involved in performing behavioral studies in a fixed-base flight simulator that is suitable for evaluating closed-loop control performance, eye- movement behavior and physiological recordings.}, web_url = {https://www.teap.de/memory/TeaP_Abstracts_20140219.pdf}, event_name = {56th Conference of Experimental Psychologists (TeaP 2014)}, event_place = {Giessen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Flad N{nflad}{Department Human Perception, Cognition and Action}; Scheer M{mscheer}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ BrowatzkiBC2014, title = {A comparison of geometric- and regression-based mobile gaze-tracking}, journal = {Frontiers in Human Neuroscience}, year = {2014}, month = {4}, volume = {8}, number = {200}, pages = {1-12}, abstract = {Video-based gaze-tracking systems are typically restricted in terms of their effective tracking space. This constraint limits the use of eyetrackers in studying mobile human behavior. Here, we compare two possible approaches for estimating the gaze of participants who are free to walk in a large space whilst looking at different regions of a large display. Geometrically, we linearly combined eye-in-head rotations and head-in-world coordinates to derive a gaze vector and its intersection with a planar display, by relying on the use of a head-mounted eyetracker and body-motion tracker. Alternatively, we employed Gaussian process regression to estimate the gaze intersection directly from the input data itself. Our evaluation of both methods indicates that a regression approach can deliver comparable results to a geometric approach. The regression approach is favored, given that it has the potential for further optimization, provides confidence bounds for its gaze estimates and offers greater flexibility in its implementation. Open-source software for the methods reported here is also provided for user implementation.}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fnhum.2014.00200/abstract}, state = {published}, DOI = {10.3389/fnhum.2014.00200}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ FladC2014, title = {Setting up a high-fidelity flight simulator to study closed-loop control and physiological workload}, year = {2014}, month = {3}, web_url = {http://www.interdisciplinary-college.de/previous-iks?id=21}, event_name = {Interdisciplinary College: Cognition 3.0 - the social mind in the connected world (IK 2014)}, event_place = {Günne, Germany}, state = {published}, author = {Flad N{nflad}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Thesis{ Bieg2014_2, title = {On the coordination of saccades with hand and smooth pursuit eye movements}, year = {2014}, month = {2}, web_url = {https://bibliographie.uni-tuebingen.de/xmlui/handle/10900/51113}, state = {published}, type = {PhD}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Book{ Bieg2014, title = {On the coordination of saccades with hand and smooth pursuit eye movements}, year = {2014}, pages = {130}, abstract = {Saccades are rapid eye movements that relocate the fovea, the retinal area with highest acuity, to fixate different points in the visual field in turn. Where and when the eyes shift needs to be tightly coordinated with our behavior. The current thesis investigates how this coordination is achieved. Part I examines the coordination of eye and hand movements. Previous studies suggest that the neural processes that coordinate saccades and hand movements do so by adjusting the onset time and movement speed of saccades. I argue against this hypothesis by showing that the need to process task-relevant visual information at the saccade endpoint is sufficient to cause such adjustments. Rather than a mechanism to coordinate the eyes with the hands, changes in saccade onset time and speed may reflect the increased importance of vision at a saccade's target location. Part II examines the coordination of smooth pursuit and saccadic eye movements. Smooth pursuit eye movements are slow eye movements that follow a moving object of interest. The eyes frequently alternate between smooth pursuit and saccadic eye movements, which suggests that their control processes are closely coupled. In support of this idea, smooth pursuit eye movements are shown to systematically influence the onset time of saccadic eye movements. This influence may rest on two different mechanisms: first, a bias in visual attention in the direction of pursuit for saccades that occur during smooth pursuit; second, a mechanism that inhibits the saccadic response in the case of saccades to a moving target. Evidence for the latter hypothesis is provided by the observation that both the probability of occurence and the latency of saccades to a moving target depend on the target's eccentricity and velocity.}, note = {Tübingen, Univ., Diss., 2014}, web_url = {http://www.logos-verlag.de/cgi-bin/buch/isbn/3648}, publisher = {Logos Verlag}, address = {Berlin, Germany}, series = {MPI Series in Biological Cybernetics ; 37}, state = {published}, type = {PhD}, ISBN = {978-3-8325-3648-0}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Inproceedings{ NieuwenhuizenCB2013, title = {myCopter: Enabling Technologies for Personal Aerial Transportation Systems: Project status after 2.5 years}, year = {2013}, month = {11}, pages = {1-3}, abstract = {Current means of transportation for daily commuting are reaching their limits during peak travel times, which results in waste of fuel and loss of time and money. A recent study commissioned by the European Union considers a personal aerial transportation system (PATS) as a viable alternative for transportation to and from work. It also acknowledges that developing such a transportation system should not focus on designing a new flying vehicle for personal use, but instead on investigating issues surrounding the implementation of the transportation system itself. This is the aim of European project myCopter: to determine the social and technological aspects needed to set up a transportation system based on personal aerial vehicles (PAVs). The project focuses on three research areas: human-machine interfaces and training, automation technologies, and social acceptance. Our extended abstract for inclusion in the conference proceedings and our presentation will focus on the achievements during the first 2.5 years of the 4-year project. These include the development of an augmented dynamic model of a PAV with excellent handling qualities that are suitable for training purposes. The training requirements for novice pilots are currently under development. Experimental evaluations on haptic guidance and human-in-the-loop control tasks have allowed us to start implementing a haptic Highway-in-the-Sky display to support novice pilots and to investigate metrics for objectively determining workload using psychophysiological measurements. Within the project, developments for automation technologies have focused on vision-based algorithms. We have integrated such algorithms in the control and navigation architecture of unmanned aerial vehicles (UAVs). Detecting suitable landing spots from monocular camera images recorded in flight has proven to reliably work off-line, but further work is required to be able to use this approach in real time. Furthermore, we have built multiple low-cost UAVs and equipped them with radar sensors to test collision avoidance strategies in real flight. Such algorithms are currently under development and will take inspiration from crowd simulations. Finally, using technology assessment methodologies, we have assessed potential markets for PAVs and challenges for its integration into the current transportation system. This will lead to structured discussions on expectations and requirements of potential PAV users.}, file_url = {fileadmin/user_upload/files/publications/2013/HeliWorld-2013-Nieuwenhuizen.pdf}, publisher = {Airtec GmbH}, address = {Frankfurt a. Main, Germany}, event_name = {5. Internationale HELI World Konferenz "HELICOPTER Technologies", "HELICOPTER Operations" at the International Aerospace Supply Fair AIRTEC 2013}, event_place = {Frankfurt a.M., Germany}, state = {published}, ISBN = {978-3-942939-10-2}, author = {Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Bieg2013, title = {Oculomotor Decisions: Saccadic or Smooth Response?}, year = {2013}, month = {10}, day = {1}, volume = {14}, pages = {19}, web_url = {http://www.cin.uni-tuebingen.de/fileadmin/content/05_News_%26_Events/Conferences/Conference_130930_NeNa_2013.pdf}, event_name = {14th Conference of Junior Neuroscientists of Tübingen (NeNa 2013)}, event_place = {Schramberg, Germany}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Article{ BiegBBC2013, title = {Saccade reaction time asymmetries during task-switching in pursuit tracking}, journal = {Experimental Brain Research}, year = {2013}, month = {10}, volume = {230}, number = {3}, pages = {271-281}, abstract = {We investigate how smooth pursuit eye movements affect the latencies of task-switching saccades. Participants had to alternate their foveal vision between a continuous pursuit task in the display center and a discrete object discrimination task in the periphery. The pursuit task was either carried out by following the target with the eyes only (ocular) or by steering an on-screen cursor with a joystick (oculomanual). We measured participants’ saccadic reaction times (SRTs) when foveal vision was shifted from the pursuit task to the discrimination task and back to the pursuit task. Our results show asymmetries in SRTs depending on the movement direction of the pursuit target: SRTs were generally shorter in the direction of pursuit. Specifically, SRTs from the pursuit target were shorter when the discrimination object appeared in the motion direction. SRTs to pursuit were shorter when the pursuit target moved away from the current fixation location. This result was independent of the type of smooth pursuit behavior that was performed by participants (ocular/oculomanual). The effects are discussed in regard to asymmetries in attention and processes that suppress saccades at the onset of pursuit.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs00221-013-3651-9.pdf}, state = {published}, DOI = {10.1007/s00221-013-3651-9}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangNB2013, title = {A Fixed-Based Flight Simulator Study: The Interdependence of Flight Control Performance and Gaze Efficiency}, year = {2013}, month = {7}, pages = {95-104}, abstract = {Here, a descriptive study is reported that addresses the relationship between flight control performance and instrument scanning behavior. This work was performed in a fixed-based flight simulator. It targets the ability of untrained novices to pilot a lightweight rotorcraft in a flight scenario that consisted of fundamental mission task elements such as speed and altitude changes. The results indicate that better control performance occurs when gaze is more selective for and focused on key instruments. Ideal instrument scanning behavior is proposed and its relevance for training instructions and visual instrument design is discussed.}, file_url = {fileadmin/user_upload/files/publications/2013/HCI-I-2013-Chuang.pdf}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-642-39354-9.pdf}, editor = {Harris, D.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 8020}, booktitle = {Engineering Psychology and Cognitive Ergonomics: Applications and Services}, event_name = {10th International Conference EPCE 2013, Held as Part of HCI International 2013}, event_place = {Las Vegas, NV, USA}, state = {published}, ISBN = {978-3-642-39353-2}, DOI = {10.1007/978-3-642-39354-9_11}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulftoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BiegBC2013, title = {Attentional Biases during Steering Behavior}, year = {2013}, month = {7}, pages = {21-27}, abstract = {In the current study, we examine eye movements of human operators during a combined steering and discrimination task. In this task, observers had to alternate their gaze between a central steering task and a discrimination task in the periphery. Our results show that the observer’s gaze behavior is influenced by the motion direction of the steering task. Saccade reaction times (SRTs) of saccades to the discrimination target were shorter if the target appeared in the steering direction. SRTs back to the steering task were shorter when the steering target moved away from the discrimination target. These effects are likely the result of motion-related attention shifts and an interaction of the saccadic and smooth pursuit eye movement system.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-642-39173-6.pdf}, editor = {Duffy, V.G.}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 8020}, booktitle = {Digital Human Modeling and Applications in Health, Safety, Ergonomics, and Risk Management: Healthcare and Safety of the Environment and Transport}, event_name = {4th International Conference DHM 2013, Held as Part of HCI International 2013}, event_place = {Las Vegas, NV, USA}, state = {published}, ISBN = {978-3-642-39172-9}, DOI = {10.1007/978-3-642-39173-6_3}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Article{ BonevCE2012, title = {How do image complexity, task demands and looking biases influence human gaze behavior?}, journal = {Pattern Recognition Letters}, year = {2013}, month = {5}, volume = {34}, number = {7}, pages = {723–730}, abstract = {In this paper we propose an information-theoretic approach to understand eye-movement patterns, in relation to the task performed and image complexity. We commence with the analysis of the distributions and amplitudes of eye-movement saccades, performed across two different image-viewing tasks: free viewing and visual search. Our working hypothesis is that the complexity of image information and task demands should interact. This should be reflected in the Markovian pattern of short and long saccades. We compute high-order Markovian models of performing a large saccade after many short ones and also propose a novel method for quantifying image complexity. The analysis of the interaction between high-order Markovianity, task and image complexity supports our hypothesis.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0167865512001687}, state = {published}, DOI = {10.1016/j.patrec.2012.05.007}, author = {Bonev B; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Escolano F} } @Article{ SonFLKBR2012, title = {Human-Centered Design and Evaluation of Haptic Cueing for Teleoperation of Multiple Mobile Robots}, journal = {IEEE Transactions on Cybernetics}, year = {2013}, month = {4}, volume = {43}, number = {2}, pages = {597-609}, abstract = {In this paper, we investigate the effect of haptic cueing on a human operator's performance in the field of bilateral teleoperation of multiple mobile robots, particularly multiple unmanned aerial vehicles (UAVs). Two aspects of human performance are deemed important in this area, namely, the maneuverability of mobile robots and the perceptual sensitivity of the remote environment. We introduce metrics that allow us to address these aspects in two psychophysical studies, which are reported here. Three fundamental haptic cue types were evaluated. The Force cue conveys information on the proximity of the commanded trajectory to obstacles in the remote environment. The Velocity cue represents the mismatch between the commanded and actual velocities of the UAVs and can implicitly provide a rich amount of information regarding the actual behavior of the UAVs. Finally, the Velocity+Force cue is a linear combination of the two. Our experimental results show that, while maneuverability is best supported by the Force cue feedback, perceptual sensitivity is best served by the Velocity cue feedback. In addition, we show that large gains in the haptic feedbacks do not always guarantee an enhancement in the teleoperator's performance.}, file_url = {fileadmin/user_upload/files/publications/2012/2013a-SonFraChuKimBueRob.pdf}, web_url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6294459}, state = {published}, DOI = {10.1109/TSMCB.2012.2212884}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}} } @Poster{ BiegCBB2013, title = {Asymmetric saccade initiation at smooth pursuit onset}, year = {2013}, month = {1}, web_url = {http://www.fh-ooe.at/kongresswesen/konferenzen-kongresse/2013/23rd-oculomotor-meeting-2013/}, event_name = {23rd Oculomotor Meeting}, event_place = {Linz, Austria}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2012, title = {Active Information Retrieval in Scene Perception and Object Learning}, year = {2012}, month = {11}, day = {2}, abstract = {We pick out task-relevant information from the visual scene by moving our eyes and confidently manipulate our near-environment to achieve our goals. A better understanding of human behavior can be achieved by adopting this perspective. That is, humans are active (not passive observers). In my talk, I will address how we characterize natural information-seeking behavior in human participants in two context: a) scene processing, b) object learning. The first addresses how unrestrained gaze behavior can be characterized in terms of the information that is available in the scene. Here, I will explain why and how we eschew pure bottom-up procedures of using low-level image statistics to predict gaze movements. Next, I will discuss how we select which views of unfamiliar objects to learn, when we are free to manipulate them in 3D.}, event_name = {Department of Cognitive Neuroscience: Duke-NUS Graduate Medical School}, event_place = {Singapore}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ NieuwenhuizenCB2012, title = {myCopter: Enabling Technologies for Personal Aerial Transportation Systems A progress report}, year = {2012}, month = {11}, abstract = {The volume of both road and air transportation continues to increase despite many concerns regarding its financial and environmental impact. The European Union ‘Out of the Box’ study suggests a personal aerial transportation system (PATS) as an alternative means of transport for daily commuting. The aim of the myCopter project is to determine the social and technical aspects needed to set up such a transportation system based on personal aerial vehicles (PAVs). The project focuses on three research areas: the human-machine interface and training, automation technologies, and social acceptance. In the first phase of the project, requirements were defined for automation technologies in terms of sensors and test platforms. Additionally, desirable features for PAVs were investigated to support the design and evaluation of technologies for an effective human-machine interface. Furthermore, an overview of the social-technological environment provided insight into the challenges and issues that surround the realisation of a PATS and its integration into the current transportation system in Europe. The presentation will elaborate on the second phase of the myCopter project, in which initial designs for a human-machine interface and training are developed. These are evaluated experimentally with a focus on aiding non-expert pilots in closed-loop control scenarios. Additionally, first evaluations of novel automation technologies are performed in simulated environments and evaluations on flying test platforms. At the same time, technological issues are evaluated that contribute towards a reflexive design of PAV technologies based on criteria that are acceptable to the general public. The presentation will also focus on the next stages of the project, in which further experimental evaluations will be performed on technologies for human-machine interfaces, and where developed automation technologies will be fully tested on unmanned flying vehicles. The expectations and perspectives of potential PAV user will be evaluated in group interviews in different European countries. Interesting technological and regulatory challenges need to be resolved for the development of a transportation system based on PAVs. The myCopter consortium combines the expertise from several research fields to tackle these challenges and to develop the technological and social aspects of a personal aerial transportation system.}, file_url = {fileadmin/user_upload/files/publications/2012/HELIWorld-2012-Nieuwenhuizen.pdf}, event_name = {4th International HELI World Conference at the International Aerospace Supply Fair AIRTEC 2012}, event_place = {Frankfurt a.M., Germany}, state = {published}, author = {Nieuwenhuizen F{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ BiegBBC2012, title = {Looking for Discriminating Is Different from Looking for Looking's Sake}, journal = {PLoS ONE}, year = {2012}, month = {9}, volume = {7}, number = {9}, pages = {1-9}, abstract = {Recent studies provide evidence for task-specific influences on saccadic eye movements. For instance, saccades exhibit higher peak velocity when the task requires coordinating eye and hand movements. The current study shows that the need to process task-relevant visual information at the saccade endpoint can be, in itself, sufficient to cause such effects. In this study, participants performed a visual discrimination task which required a saccade for successful completion. We compared the characteristics of these task-related saccades to those of classical target-elicited saccades, which required participants to fixate a visual target without performing a discrimination task. The results show that task-related saccades are faster and initiated earlier than target-elicited saccades. Differences between both saccade types are also noted in their saccade reaction time distributions and their main sequences, i.e., the relationship between saccade velocity, duration, and amplitude.}, web_url = {http://www.plosone.org/article/fetchObjectAttachment.action;jsessionid=409E420397B230BE376365245B458D2A?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0045445&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0045445}, EPUB = {e45445}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ BiegBBC2012_2, title = {Asymmetries in saccadic latencies during interrupted ocular pursuit}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {137}, abstract = {Smooth pursuit eye movements can be interrupted and resumed at a later stage, eg, when a concurrent task requires visual sampling from elsewhere. Here we address whether and how interruptive saccades are affected by pursuit movements. Our participants pursued an object which moved horizontally in a sinusoidal pattern (frequency: 0.25 Hz, amplitude: 4 deg. visual angle). During this, discrimination targets appeared at 10 deg. eccentricity, to the left or right of the center. They were timed so that they appeared for 1 second while the pursuit object moved either toward or away from the discrimination target's position. Saccade reaction times were earlier when the discrimination targets appeared in a position that the tracking object was moving towards. Interestingly, saccade RTs back to the pursuit object were shorter when the object moved away from the discrimination target. We conclude that interruptions of pursuit movements lead to asymmetries in saccade generation. These asymmetries could have been caused by biases in attention along the predicted pursuit path.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangNB2012, title = {Eye-movement planning during flight maneuvers}, journal = {Perception}, year = {2012}, month = {9}, volume = {41}, number = {ECVP Abstract Supplement}, pages = {99}, abstract = {How are eye-movements planned to access relevant visual information during flight control? From the cockpit perspective, there are two classes of visual information that are relevant for flight control. First, the changing visuals of the external world provide direct perceptual feedback on how the pilot's command of the control stick is affecting the aircraft's current position, orientation and velocity. Second, flight instruments provide abstracted and specific values—on factors such as the aircraft's compass bearing and vertical speed—that have to be continuously monitored, in order for the global objective of certain maneuvers (eg, turns) to be achieved. Trained pilots have to coordinate their eye-movements across this structured visual workspace (ie, outside view and instruments) to access timely and task-relevant information. The current work focuses on providing descriptions of these planned eye-movements. Eye-movements were recorded of pilots in a high-fidelity flight simulator (100° field-of-view) whilst they performed specific flight maneuvers. Fixation durations and transitions between the individual instruments and aspects of the external environment are represented as network graphs. This allowed us to formally describe the sources of information that were relied on across the different tasks and to compare actual performance to expert predictions.}, web_url = {http://pec.sagepub.com/content/41/1_suppl.toc}, event_name = {35th European Conference on Visual Perception}, event_place = {Alghero, Italy}, state = {published}, DOI = {10.1177/03010066120410S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen F{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangNB2012_2, title = {Investigating Gaze Behavior of Novice Pilots during Basic Flight Maneuvers}, year = {2012}, month = {9}, web_url = {http://research.fit.edu/hci-aero/hci-aero2012/Poster_Sessions.html}, event_name = {International Conference on Human-Computer Interaction in Aerospace (HCI-Aero 2012)}, event_place = {Bruxelles, Belgium}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Nieuwenhuizen FM{fmnieuwenhuizen}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BiegCFRB2012, title = {Einfluss von Ablenkung und Augenbewegungen auf Steuerungsaufgaben}, year = {2012}, month = {8}, pages = {341-344}, abstract = {In der vorliegenden Studie wurde der Einfluss visueller Ablenkung auf Steuerungsaufgaben untersucht. Die Ergebnisse deuten darauf hin, dass bereits eine kurze Verlagerung der Aufmerksamkeit und des Blicks mit einer systematischen Beeinflussung der Steuerungsaufgabe einhergeht. Im Gegenzug findet auch eine systematische Beeinflussung der Augenbewegungen durch die gleichzeitig durchgeführte Steuerungsaufgabe statt. Die Berücksichtigung solcher Interferenzen kann bei der Entwicklung von grafischen On-Board-Informationssystemen für Fahr- oder Flugzeuge von Nutzen sein.}, web_url = {http://dl.mensch-und-computer.de/handle/123456789/2907}, editor = {Reiterer, H. , O. Deussen}, publisher = {Oldenbourg}, address = {München, Germany}, booktitle = {Mensch & Computer 2012: 12. fachübergreifende Konferenz für interaktive und kooperative Medien}, event_name = {Mensch & Computer (M&C)}, event_place = {Konstanz, Germany}, state = {published}, ISBN = {978-3-486-71879-9}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Bieg2012, title = {Saccadic and pursuit eye movements during coordinated behaviors}, year = {2012}, month = {6}, day = {19}, abstract = {Gaze control is largely determined by an observer's behavioral goals. In this respect, eye movements are one of many coordinated actions to achieve these goals. In this presentation I will discuss experiments which studied the properties of saccadic and pursuit eye movements in concerted behaviors, for example in steering a vehicle. In this respect, I will consider an aspect of gaze control which has largely been neglected by previous research: the consequence of eye movements for visual perception and the associated behavioral consequence in terms of the value of the obtained additional information}, web_url = {http://webu2.upmf-grenoble.fr/LPNC/seminaire_2012-06-19_13h-14h}, event_name = {Laboratoire de Psychologie et Neurocognition (LPNC), Université Pierre Mendès-France}, event_place = {Grenoble, France}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Article{ ChuangVB2012_2, title = {Learned non-rigid object motion is a view-invariant cue to recognizing novel objects}, journal = {Frontiers in Computational Neuroscience}, year = {2012}, month = {5}, volume = {6}, number = {26}, pages = {1-8}, abstract = {There is evidence that observers use learned object motion to recognize objects. For instance, studies have shown that reversing the learned direction in which a rigid object rotated in depth impaired recognition accuracy. This motion reversal can be achieved by playing animation sequences of moving objects in reverse frame order. In the current study, we used this sequence-reversal manipulation to investigate whether observers encode the motion of dynamic objects in visual memory, and whether such dynamic representations are encoded in a way that is dependent on the viewing conditions. Participants first learned dynamic novel objects, presented as animation sequences. Following learning, they were then tested on their ability to recognize these learned objects when their animation sequence was shown in the same sequence order as during learning or in the reverse sequence order. In Experiment 1, we found that non-rigid motion contributed to recognition performance; that is, sequence-reversal decreased sensitivity across different tasks. In subsequent experiments, we tested the recognition of non-rigidly deforming (Experiment 2) and rigidly rotating (Experiment 3) objects across novel viewpoints. Recognition performance was affected by viewpoint changes for both experiments. Learned non-rigid motion continued to contribute to recognition performance and this benefit was the same across all viewpoint changes. By comparison, learned rigid motion did not contribute to recognition performance. These results suggest that non-rigid motion provides a source of information for recognizing dynamic objects, which is not affected by changes to viewpoint.}, web_url = {http://www.frontiersin.org/Journal/DownloadFile.ashx?pdf=1&FileId=%2062343&articleId=%2022441&Version=%201&ContentTypeId=21&FileName=%20fncom-06-00026.pdf}, state = {published}, DOI = {10.3389/fncom.2012.00026}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BrowatzkiTMBW2012, title = {Active Object Recognition on a Humanoid Robot}, year = {2012}, month = {5}, pages = {2021-2028}, abstract = {Interaction with its environment is a key requisite for a humanoid robot. Especially the ability to recognize and manipulate unknown objects is crucial to successfully work in natural environments. Visual object recognition, however, still remains a challenging problem, as three-dimensional objects often give rise to ambiguous, two-dimensional views. Here, we propose a perception-driven, multisensory exploration and recognition scheme to actively resolve ambiguities that emerge at certain viewpoints. We define an efficient method to acquire two-dimensional views in an object-centered task space and sample characteristic views on a view sphere. Information is accumulated during the recognition process and used to select actions expected to be most beneficial in discriminating similar objects. Besides visual information we take into account proprioceptive information to create more reliable hypotheses. Simulation and real-world results clearly demonstrate the efficiency of active, multisensory exploration over passive, visiononly recognition methods.}, file_url = {fileadmin/user_upload/files/publications/2012/ICRA-2012-Browatzki.pdf}, web_url = {http://www.icra2012.org/}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE International Conference on Robotics and Automation (ICRA 2012)}, event_place = {St. Paul, MN, USA}, state = {published}, DOI = {10.1109/ICRA.2012.6225218}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; Tikhanoff V; Metta G; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_4, title = {Perception of the active observer}, year = {2011}, month = {11}, day = {2}, abstract = {As active observers, we move our eyes, re-orient our bodies and even manipulate our environment to access task-relevant information. The purpose of this talk is demonstrate that our understanding of human behavior can be enriched by considering that the observer is often-times responsible for his own perceptual input. I will do so by first presenting research that: a) addressed how object speeds are estimated during locomotion, b) investigated how we explore objects during learning for subsequent recognition. Following this, I will present research in two application scenarios that exemplifies the role of the active observer — namely, teleoperation of swarm-UAVs and gazetracking on wall-sized displays.}, web_url = {http://ikw.uni-osnabrueck.de/en/node/680}, event_name = {Institute of Cognitive Science, Universität Osnabrück}, event_place = {Osnabrück, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ BrowatzkiFGBW2011, title = {Going into depth: Evaluating 2D and 3D cues for object classification on a new, large-scale object dataset}, year = {2011}, month = {11}, pages = {1189-1195}, abstract = {Categorization of objects solely based on shape and appearance is still a largely unresolved issue. With the advent of new sensor technologies, such as consumer-level range sensors, new possibilities for shape processing have become available for a range of new application domains. In the first part of this paper, we introduce a novel, large dataset containing 18 categories of objects found in typical household and office environments-we envision this dataset to be useful in many applications ranging from robotics to computer vision. The second part of the paper presents computational experiments on object categorization with classifiers exploiting both two-dimensional and three-dimensional information. We evaluate categorization performance for both modalities in separate and combined representations and demonstrate the advantages of using range data for object and shape processing skills.}, web_url = {http://www.vision.ee.ethz.ch/CDC4CV/index.html}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, booktitle = {2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)}, event_name = {1st ICCV Workshop on Consumer Depth Cameras in Computer Vision (CD4CV2011)}, event_place = {Barcelona, Spain}, state = {published}, ISBN = {978-1-467-30062-9}, DOI = {10.1109/ICCVW.2011.6130385}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; Fischer J; Graf B; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonCKB2011, title = {Haptic Feedback Cues Can Improve Human Perceptual Awareness in Multi-Robots Teleoperation}, year = {2011}, month = {10}, pages = {1323-1328}, abstract = {The availability of additional force cues in haptic devices are often expected to improve control performance, over conditions that only provide visual feedback. However, there is little empirical evidence to show this to be true for the teleoperation control of remote vehicles (i.e., multiple unmanned aerial vehicles (UAVs)). In this paper, we show that force cues can increase one's sensitivity in discerning the presence of obstacles in the remote multi-UAVs' environment. Significant benefits, relative to a purely visual scenario, were achieved only when force cues were sufficiently amplified by large gains. In addition, force cues tended to provide stronger benefits when they were based on the UAVs' velocity information.}, web_url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6106130}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {11th International Conference on Control, Automations and Systems (ICCAS 2011)}, event_place = {Gyeonggi-do, Korea}, state = {published}, ISBN = {978-1-4577-0835-0}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ LeeBBC2011, title = {Fast Fitting on a Saccadic Eye Movement Model for Decision Making}, year = {2011}, month = {10}, volume = {12}, pages = {33}, abstract = {How does our visual system decide where to look? The Linear Approach to Threshold with Ergodic Rate (LATER: Carpenter, 1995) is a simple decision-making model for saccadic eye movements. Currently, experimental data suggest that saccadic eye-movements can be discriminated according to whether they are performed for directed fixations or for item recognition (Montagnini & Chelazzi, 2005; Bieg et al., submitted). Unfortunately, sufficient goodness-of-fit can only be acquired with large datasets, for each individual participant. Here, we investigate whether adapting LATER with modern computational methods can allow for saccades to be classified for their functionality, with minimal data and in real-time. In doing so, we strive towards the eventual goal of using the LATER model for predicting observer intentions in real-world applications.}, event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Lee JJ{jlee}{Department Human Perception, Cognition and Action}; Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_3, title = {Moving objects: From object speed estimation to object exploration}, year = {2011}, month = {10}, web_url = {http://www.liv.ac.uk/psychology/}, event_name = {Department of Psychology, University of Liverpool}, event_place = {Liverpool, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011_2, title = {The active observer: Implications for science and engineering}, year = {2011}, month = {10}, web_url = {http://www.tno.nl/content.cfm?context=thema&content=markt_product&laag1=892&laag2=184&laag3=401&item_id=1581&Taal=1}, event_name = {TNO Human Factors}, event_place = {Soesterberg, Netherlands}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Article{ BulthoffC2011, title = {Seeing: The Computational Approach to Biological Vision. Second Edition. By John P. Frisby and James V. Stone. Cambridge (Massachusetts): MIT Press}, journal = {Quarterly Review of Biology}, year = {2011}, month = {9}, volume = {86}, number = {3}, pages = {227}, web_url = {http://www.journals.uchicago.edu/doi/10.1086/661174}, state = {published}, DOI = {10.1086/661174}, author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ EngelBHC2011, title = {Image Retrieval with Semantic Sketches}, year = {2011}, month = {9}, pages = {412-425}, abstract = {With increasingly large image databases, searching in them becomes an ever more difficult endeavor. Consequently, there is a need for advanced tools for image retrieval in a webscale context. Searching by tags becomes intractable in such scenarios as large numbers of images will correspond to queries such as “car and house and street”. We present a novel approach that allows a user to search for images based on semantic sketches that describe the desired composition of the image. Our system operates on images with labels for a few high-level object categories, allowing us to search very fast with a minimal memory footprint. We employ a structure similar to random decision forests which avails a data-driven partitioning of the image space providing a search in logarithmic time with respect to the number of images. This makes our system applicable for large scale image search problems. We performed a user study that demonstrates the validity and usability of our approach.}, web_url = {http://interact2011.org/}, editor = {Campos, P. , N. Graham, J. Jorge, N. Nunes, P. Palanque, M. Winckler}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 6946}, booktitle = {Human-Computer Interaction: INTERACT 2011}, event_name = {13th IFIP TC13 Conference on Human-Computer Interaction}, event_place = {Lisboa, Portugal}, state = {published}, ISBN = {978-3-642-23774-4}, DOI = {10.1007/978-3-642-23774-4_35}, author = {Engel D{engel}{Department Human Perception, Cognition and Action}; Herdtweck C{grueschaan}{Department Human Perception, Cognition and Action}; Browatzki B{browatbn}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonCFKLLBR2011, title = {Measuring an Operator's Maneuverability Performance in the Haptic Teleoperation of Multiple Robots}, year = {2011}, month = {9}, pages = {3039-3046}, abstract = {In this paper, we investigate the maneuverability performance of human teleoperators on multi-robots. First, we propose that maneuverability performance can be assessed by a frequency response function that jointly considers the input force of the operator and the position errors of the multi-robot system that is being maneuvered. Doing so allows us to evaluate maneuverability performance in terms of the human teleoperator's interaction with the controlled system. This allowed us to effectively determine the suitability of different haptic cue algorithms in improving teleoperation maneuverability. Performance metrics based on the human teleoperator's frequency response function indicate that maneuverability performance is best supported by a haptic feedback algorithm which is based on an obstacle avoidance force.}, file_url = {fileadmin/user_upload/files/publications/2011/IROS-2011-Son.pdf}, web_url = {http://www.iros2011.org/}, editor = {Amato, N.M.}, publisher = {IEEE}, address = {Piscatawy, NJ, USA}, event_name = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2011)}, event_place = {San Francisco, CA, USA}, state = {published}, ISBN = {978-1-61284-454-1}, DOI = {10.1109/IROS.2011.6048185}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Lee D; Lee S-W; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangS2011, title = {Object speed estimation during walking does not add up}, year = {2011}, month = {9}, web_url = {http://www.bccn-tuebingen.de/news/article/symposium-imultisensory-perception-and-actioni-96.html}, event_name = {Bernstein Cluster D Symposium: Multisensory Perception and Action}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangBS2011, title = {The center-surround effect in visual speed estimation during walking}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {129}, abstract = {Walking reduces visual speed estimates of optic flow (Souman et al, 2010 Journal of Vision 10(11):14]. Simultaneously, visual background motion can influence the perceived speed of moving objects (Tynan and Sekular, 1975 Vision Research 25 1231–1238; Baker and Graf, 2010 Vision Research 50 193–201). These two effects have been attributed to different subtractive processes, which may help in segregating object motion from self-motion induced optic flow. Here, we investigate how both factors jointly contribute to the perceived visual speed of objects. Participants compared the speed of two central Gabor patches on a ground plane, presented in consecutive intervals, either while standing still or while walking on a treadmill. In half the trials, one of the Gabors was surrounded by a moving random dot pattern, the speed of which matched walking speed. Our results replicated previous findings. A moving surround as well as walking can independently induce a subtractive effect on the perceived speed of the moving center, with the effect size increasing with center speed. However, walking does not affect visual speed estimates of the center when a visual surround is present. These results suggest that the visual input dominates the segregation of object motion from background optic flow.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Souman J{souman}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2011, title = {How do we seek out information?}, year = {2011}, month = {8}, day = {29}, abstract = {Many tasks require us to access relevant information from a dynamic visual input. To do so, we move our eyes and bodies as well as manipulate our environments. Unfortunately, experiments on human behavior tend to ignore this fact, often to the detriment of their ecological validity. Our understanding can be better informed by studying how humans actively seek out relevant information in their unrestrained and task-relevant workspaces. I will present several research studies from our lab to demonstrate this point. These studies relate to how humans explore novel objects, unrestrained gaze measurements on wall-sized displays, and the influence of haptic force feedback on the teleoperation of micro unmanned aerial vehicles. Finally, I will introduce our latest research project that targets the implications of a personal air transport system (www.mycopter.eu).}, file_url = {fileadmin/user_upload/files/publications/2011/D-CIS-Lab-2011-Chuang.pdf}, web_url = {http://www.d-cis.nl/news/210-colloquium-how-do-we-seek-out-information-}, event_name = {D-CIS Lab Colloquium}, event_place = {Delft, Netherlands}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SonKCFRLB2011, title = {An Evaluation of Haptic Cues on the Tele-Operator's Perceptual Awareness of Multiple UAVs' Environments}, year = {2011}, month = {6}, pages = {149-154}, abstract = {The use of multiple unmanned aerial vehicles (UAVs) is increasingly being incorporated into a wide range of teleoperation applications. To date, relevant research has largely been focused on the development of appropriate control schemes. In this paper, we extend previous research by investigating how control performance could be improved by providing the teleoperator with haptic feedback cues. First, we describe a control scheme that allows a teleoperator to manipulate the flight of multiple UAVs in a remote environment. Next, we present three designs of haptic cue feedback that could increase the teleoperator's environmental awareness of such a remote environment. These cues are based on the UAVs' i) velocity information, ii) proximity to obstacles, and iii) a combination of these two sources of information. Finally, we present an experimental evaluation of these haptic cue designs. Our evaluation is based on the teleoperator's perceptual sensitivity to the physical environment inhabited by the multiple UAVs. We conclude that a teleoperator's perceptual sensitivity is best served by haptic feedback cues that are based on the velocity information of multiple UAVs.}, file_url = {fileadmin/user_upload/files/publications/2011/WHC-2011-Son.pdf}, web_url = {http://www.haptics2011.org/en/}, editor = {Jones, L. , M. Harders, Y. Yokokohji}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE 2011 World Haptics Conference (WHC 2011)}, event_place = {Istanbul, Turkey}, state = {published}, ISBN = {978-1-4577-0299-0}, DOI = {10.1109/WHC.2011.5945477}, author = {Son HI{chakurt}{Department Human Perception, Cognition and Action}; Kim J{junsukkim}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Franchi A{antonio}{Department Human Perception, Cognition and Action}; Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}; Lee D; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ WallravenC2011, title = {Non-accidental properties determine object exploration patterns}, year = {2011}, month = {5}, pages = {1-2}, file_url = {fileadmin/user_upload/files/publications/2011/ICCNS-2011-Wallraven.pdf}, web_url = {http://cns.bu.edu/cns-meeting/2011conference.html}, event_name = {15th International Conferece on Cognitive and Neural Systems (ICCNS 2011)}, event_place = {Boston, MA, USA}, state = {published}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Thesis{ Chuang2011_5, title = {Recognizing Objects From Dynamic Visual Experiences}, year = {2011}, state = {published}, type = {PhD}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ 7086, title = {Learning and Recognizing 3D Objects by Combination of Visual and Proprioceptive Information}, year = {2010}, month = {10}, volume = {11}, number = {9}, pages = {29}, abstract = {One major difficulty in computational object recognition lies in the fact that a 3D object can be seen from an infinite number of viewpoints. Thus, the issue arises that objects with different 3D shapes often share similar 2D views. Humans are able to resolve this kind of ambiguity by producing additional views through object manipulation or self movement. In both cases the action made provides proprioceptive information linking the visual information retrieved from the obtained views. Following this process, we combine visual and proprioceptive information to increase recognition performance of a computer vision system. In our approach we place a 3D model of an unknown object in the hand of a simulated anthropomorphic robot arm. The robot now executes a predefined exploratory movement to acquire a variety of different object views. To assure computational tractability, a subset of representative views is selected using the Keyframe concept by Wallraven et al. (2007). Each remaining frame is then annotated with the respective proprioceptive configuration of the robot arm and the transitions between these configurations are treated as links between object views. For recognizing objects this representation can be used to control the robot arm based on learned data. If both proprioceptive and visual data agree on a candidate, the object was recognized successfully. We investigated recognition performance using this method. The results show that the number of misclassified results decreases significantly as both sources â visual and proprioceptive â are available, thus demonstrating the importance of a combined space of visual and proprioceptive information.}, event_name = {11th Conference of Junior Neuroscientists of Tübingen (NeNa 2010)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Browatzki B{browatbn}{Department Human Perception, Cognition and Action}} } @Conference{ 7079, title = {Perceptual decisions speed up reflexive saccades}, year = {2010}, month = {10}, volume = {11}, pages = {6}, abstract = {Reflexive saccades are fast eye movements that follow the sudden appearance of a salient visual stimulus in the visual field. This reflexive orienting mechanism could have evolved to enable quick evaluations of sudden changes in the environment and, in doing so, support potentially vital actions (e.g., flight). In light of this, it is surprising that reflexive saccades have mostly been studied with tasks that do not require the saccade to support a perceptual judgment. In the current study we measured properties of reflexive saccades in two conditions: In one condition, the saccade enabled the performance of an object discrimination task (discrimination), in the other, it did not (fixation). In the discrimination task, participants made reflexive saccades following the sudden onsets of Landolt squares (0.1 deg. visual angle) and decided if these squares had an opening at the top or bottom. In the fixation task, the same squares were presented but without an opening. Here participants were instructed to fixate the squares as quickly as possible. The results show that saccades supporting a discrimination task are faster and are initiated earlier than saccades that do not enable the completion of such a task. This demonstrates that reflexive saccades could be influenced by the demands of the task. Possible task-specific factors could include the difficulty of the task, time pressure, or the reward associated with completion of the task.}, event_name = {11th Conference of Junior Neuroscientists of Tübingen (NeNa 2010)}, event_place = {Heiligkreuztal, Germany}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6656, title = {Towards Artificial Systems: What Can We Learn from Human Perception?}, year = {2010}, month = {9}, pages = {1-3}, abstract = {Research in learning algorithms and sensor hardware has led to rapid advances in artificial systems over the past decade. However, their performance continues to fall short of the efficiency and versatility of human behavior. In many ways, a deeper understanding of how human perceptual systems process and act upon physical sensory information can contribute to the development of better artificial systems. In the presented research, we highlight how the latest tools in computer vision, computer graphics, and virtual reality technology can be used to systematically understand the factors that determine how humans perform in realistic scenarios of complex task-solving.}, file_url = {/fileadmin/user_upload/files/publications/PRICAI-62300001%20(1)_6656[0].pdf}, web_url = {http://www.pricai2010.org/default.asp}, editor = {Zhang, B.-T. , M. A. Orgun}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 6230}, booktitle = {PRICAI 2010: Trends in Artificial Intelligence}, event_name = {11th Pacific Rim International Conference on Artificial Intelligence}, event_place = {Daegu, South Korea}, state = {published}, ISBN = {978-3-642-15246-7}, DOI = {10.1007/978-3-642-15246-7_1}, author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ ChuangBBF2010, title = {Measuring unrestrained gaze on wall-sized displays}, year = {2010}, month = {8}, pages = {347-348}, abstract = {Motivation -- Natural gaze involves the coordinated movements of eye, head and torso. This allows access to a wide field of view, up to a range of 260° (Chen, Solinger, Poncet & Lancet, 1999). The recent increase in large displays places a demand on being able to track a mobile user's gaze over this extensive range. Research approach -- We developed an extensible system for measuring the gaze of users on wall-sized displays. Our solution combines the inputs of a conventional head-mounted eyetracker (Eyelink2©, SR Research) and motion-capture system (Vicon MX©, Vicon), to provide real-time measurements of a mobile user's gaze in 3D space. Findings/Design -- The presented system serves as a single platform for studying user behavior across a wide range of tasks: single-step saccade shifts, free-viewing of natural scenes, visual search and gaze-assisted user interfaces. Importantly, it allows eye- and head-movements to be separately measured without compromising the accuracy of combined gaze measurements. Take away message -- Unrestrained gaze movements on a large display can be accurately measured by suitably combining the inputs of conventional eye- and body-tracking hardware.}, web_url = {http://ecce2010.tudelft.nl/}, editor = {Neerincx, W. , W-P Brinkman}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {28th Annual European Conference on Cognitive Ergonomics (ECCE '10)}, event_place = {Delft, Netherlands}, state = {published}, ISBN = {978-1-60558-946-6}, DOI = {10.1145/1962300.1962379}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}} } @Poster{ 6606, title = {Does adding a visual task component affect fixation accuracy?}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {35}, abstract = {Video-based eye-trackers are typically calibrated by instructing participants to fixate a series of dots, the physical locations of which are known to the system. Unfortunately, this procedure does not verify if fixation has actually occurred at the desired locations. This limitation can be remedied by requiring participants to perform a simple visual discrimination task at each location, thus mandating accurate fixation. Still, it remains an open question whether this modification could affect fixation accuracy. In the current study, we compared the accuracy of fixations that were performed with a visual discrimination task and those without such a requirement. Participants either identified the orientation of a small Landolt C (size = 0.1°) or fixated a similar probe without performing the task. Results indicate that participants fixated equally well in both tasks (mean diff. of abs. error = 0.01°, Bayes factor B01 = 4.0 with JZS prior, see [Rouder et al., 2009, Psychonomic Bulletin &am p;am p;am p; R eview, 16(2), 225-237]). Given this, we propose the implementation of this visual discrimination task to eye-tracking calibration protocols as it elicits verifiable fixations without compromising fixation accuracy.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6246, title = {Eye and Pointer Coordination in Search and Selection Tasks}, year = {2010}, month = {3}, pages = {89-92}, abstract = {Selecting a graphical item by pointing with a computer mouse is a ubiquitous task in many graphical user interfaces. Several techniques have been suggested to facilitate this task, for instance, by reducing the required movement distance. Here we measure the natural coordination of eye and mouse pointer control across several search and selection tasks. We find that users automatically minimize the distance to likely targets in an intelligent, task dependent way. When target location is highly predictable, top-down knowledge can enable users to initiate pointer movements prior to target fixation. These findings ques-tion the utility of existing assistive pointing techniques and suggest that alternative approaches might be more effective.}, file_url = {/fileadmin/user_upload/files/publications/ETRA2010-Bieg_6246[0].pdf}, web_url = {http://etra.cs.uta.fi/}, editor = {Morimoto, C. H., H. Istance, A. Hyrskykari, Q. Ji}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {Symposium on Eye Tracking Research and Applications (ETRA 2010)}, event_place = {Austin, TX, USA}, state = {published}, ISBN = {978-1-60558-994-7}, DOI = {10.1145/1743666.1743688}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}; Reiterer H; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ Bieg2009, title = {Influences of task complexity and individual differences on the performance of gaze-assisted human-machine interfaces}, year = {2009}, month = {11}, volume = {10}, number = {2}, pages = {21}, abstract = {Human-machine interfaces can be enhanced by incorporating knowledge of the user’s current point of regard. For example, Zhai and colle agues (1999) showed that faster task completion times could be achieved on a simple pointing task if the display pointer was translocated according to the user’s gaze. This manipulation removes the need to manually move the pointer and hence, promises time-savings that grows in proportion to display size. Here, we report the findings of applying the same technique on a wall-sized display (2.2m × 1.8 m), across more complex pointing task. Two main components comprised the four tasks that participants were required to perform, with and without gaze-assisted pointing: Namely, conjunctive search of colored shapes and click-and-drag of items to a circumscribed region. Contrary to previous findings, we found that gaze-assisted pointer placement significantly increased task completion times, relative to manual pointer placement. Detailed analyses revealed that task complexity and individual differences in gaze behaviour and eye-hand coordination had an adverse effect on task performance, which emphasizes the importance of considering these factors in future implementations of gaze-assisted interfaces.}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_name = {10th Conference of Junior Neuroscientists of Tübingen (NeNa 2009)}, event_place = {Ellwangen, Germany}, state = {published}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6101, title = {Gaze-Assisted Pointing for Wall-Sized Displays}, year = {2009}, month = {8}, pages = {9-12}, abstract = {Previous studies have argued for the use of gaze-assisted pointing techniques (MAGIC) in improving human-computer interaction. Here, we present experimental findings that were drawn from human performance of two tasks on a wall-sized display. Our results show that a crude adoption of MAGIC across a range of complex tasks does not increase pointing performance. More importantly, a detailed analysis of user behavior revealed several issues that were previously ignored (such as, interference of corrective saccades, increased decision time due to variability of precision, errors due to eye-hand asynchrony, and interference with search behavior) which should influence the development of gaze-assisted technology.}, web_url = {http://www.interact2009.org/}, editor = {Gross, T. , J. Gulliksen, P. Kotze, L. Oestreicher, P. Palanque, R. Oliveira Prates, M. Winckler}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 5727}, booktitle = {Human-Computer Interaction - INTERACT 2009}, event_name = {12th IFIP TC13 International Conference on Human-Computer Interaction}, event_place = {Uppsala, Sweden}, state = {published}, ISBN = {978-3-642-03658-3}, DOI = {10.1007/978-3-642-03658-3_3}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Reiterer H} } @Poster{ 5867, title = {Head mobility influences gaze behavior across natural viewing tasks}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {166}, abstract = {Natural gaze behavior is often studied under conditions that restrain head movements. Here, we report how the availability of head movement can influence gaze behavior on wall-sized images of natural outdoor scenes (field-of- view: ~90°). Participants performed half of the experiment with complete head mobility and the remaining trials with their heads restrained in a chin-rest. They were required to either rate the images for attractiveness (i.e., free-viewing) or to count the visible animals (i.e., visual search). On average, more fixations were found on the trials that allowed for head movements (unrestrained: 4.21 fixations/sec; restrained: 3.75 fixations/sec), which were also shorter in their mean duration (unrestrained: 221 ms; restrained: 252 ms). In addition, unrestrained gaze contained a larger proportion of small amplitude saccades (i.e., less than 5°), than head-restrained gaze. Finally, our participants demonstrated a general preference in fixating regions that were close to the central eye-in-h ead orientation. Altogether, these findings suggest that the availability of head movements allowed our participants to re-orient to regions of interest and sample these regions more frequently. This sampling benefit applied to both visual search and free viewing tasks. The current findings emphasize the importance of allowing head mobility when studying natural gaze behavior.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Herholz S{sherholz}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming R{roland}{Department Human Perception, Cognition and Action}} } @Poster{ 6082, title = {Influences of task complexity and individual differences on the performance of gaze-assisted human-machine interfaces}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {172}, abstract = {Human-machine interfaces can be enhanced by incorporating knowledge of the user's current point of regard. For example, Zhai and colleagues (1999) showed that faster task completion times could be achieved on a simple pointing task if the display pointer was translocated according to the user's gaze. This manipulation removes the need to manually move the pointer and hence, promises time-savings that grows in proportion to display size. Here, we report the findings of applying the same technique on a wall-sized display (2.2 m × 1.8 m), across more complex pointing task. Two main components comprised the four tasks that participants were required to perform, with and without gaze-assisted pointing: Namely, conjunctive search of colored shapes and click-and-drag of items to a circumscribed region. Contrary to previous findings, we found that gaze-assisted pointer placement significantly increased task completion times, relative to manual pointer placement. Detailed analyses revealed that task complexity and individual differences in gaze behaviour and eye-hand coordination had an adverse effect on task performance, which emphasizes the importance of considering these factors in future implementations of gaze-assisted interfaces.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Bieg H-J{bieg}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Reiterer H} } @Inproceedings{ GerkenBDR2009, title = {Enhancing input device evaluation: longitudinal approaches}, year = {2009}, month = {4}, pages = {4351-4356}, abstract = {In this paper we present our experiences with longitudinal study designs for input device evaluation. In this domain, analyzing learning is currently the main reason for applying longitudinal designs. We will shortly discuss related research questions and outline two case studies in which we used different approaches to address this issue. Finally, we will point out future research tasks in the context of longitudinal evaluation methods.}, web_url = {http://dl.acm.org/citation.cfm?doid=1520340.1520665}, publisher = {ACM Press}, address = {New York, NY, USA}, booktitle = {CHI 2009: Digital Life, New World}, event_name = {27th Annual CHI Conference on Human Factors in Computing Systems}, event_place = {Boston, MA, USA}, state = {published}, ISBN = {978-1-60558-247-4}, DOI = {10.1145/1520340.1520665}, author = {Gerken J; Bieg H-J{bieg}; Dierdorf S; Reiterer H} } @Inproceedings{ Bieg2009_2, title = {Gaze-augmented manual interaction}, year = {2009}, month = {4}, pages = {3121-3124}, abstract = {This project will demonstrate a new approach to employing users' gaze in the context of human-computer interaction. This new approach uses gaze passively in order to improve the speed and precision of manually controlled pointing techniques. Designing such gazeaugmented manual techniques requires an understanding of the principles that govern the coordination of hand and eye. This coordination is influenced by situational parameters (task complexity, input device used, etc.), which this project will explore in controlled experiments.}, web_url = {http://dl.acm.org/citation.cfm?id=1520442}, publisher = {ACM Press}, address = {New York, NY, USA}, booktitle = {CHI 2009: Digital Life, New World}, event_name = {27th Annual CHI Conference on Human Factors in Computing Systems}, event_place = {Boston, MA, USA}, state = {published}, ISBN = {978-1-60558-247-4}, DOI = {10.1145/1520340.1520442}, author = {Bieg H-J{bieg}} } @Inproceedings{ 5470, title = {LibGaze: Real-time gaze-tracking of freely moving observers for wall-sized displays}, year = {2008}, month = {10}, pages = {101-110}, abstract = {We present a mobile system for tracking the gaze of an observer in real-time as they move around freely and interact with a wall-sized display. The system combines a head-mounted eye tracker with a mo- tion capture system for tracking markers attached to the eye tracker. Our open-source software library libGaze provides routines for calibrating the sys- tem and computing the viewer’s position and gaze direction in real-time. The modular architecture of our system supports simple replacement of each of the main components with alternative technology. We use the system to perform a psychophysical user-study, designed to measure how users visually explore large displays. We find that observers use head move- ments during gaze shifts, even when these are well within the range that can be com- fortably reached by eye movements alone. This suggests that free movement is important in nor- mal gaze behaviour,motivating further applications in which the tracked user is free to move.}, file_url = {fileadmin/user_upload/files/publications/VMV-2008-Herholz.pdf}, web_url = {http://www.inf.uni-konstanz.de/vmv/}, editor = {Deussen, O. , D. Keim}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, event_name = {13th International Fall Workshop on Vision, Modeling, and Visualization (VMV 2008)}, event_place = {Konstanz, Germany}, state = {published}, author = {Herholz S{sherholz}{Department Human Perception, Cognition and Action}; Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Tanner TG{tanner}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Fleming RW{roland}{Department Human Perception, Cognition and Action}} } @Article{ 4686, title = {A dynamic object-processing network: Metric shape discrimination of dynamic objects by activation of occipito-temporal, parietal and frontal cortex}, journal = {Cerebral Cortex}, year = {2008}, month = {6}, volume = {18}, number = {6}, pages = {1302-1313}, abstract = {Shape perception is important for object recognition. However, behavioural studies have shown that rigid motion also contributes directly to the recognition process, in addition to providing visual cues to shape. Using psychophysics and functional brain imaging, we investigated the neural mechanisms involved in shape and motion processing for dynamic object recognition. Observers discriminated between pairs of rotating novel objects in which the three-dimensional shape difference between the pair was systematically varied in metric steps. In addition, the objects rotated in either the same or different direction to determine the effect of task-irrelevant motion on behaviour and neural activity. We found that observers’ shape discrimination performance increased systematically with shape differences, as did the haemodynamic responses of occipito-temporal, parietal and frontal regions. Furthermore, responses in occipital regions were only correlated with observers’ perceived shape differences. We also found d ifferent effects of object motion on shape discrimination across observers which were reflected in responses of the superior temporal sulcus. These results suggest a network of regions that are involved in the discrimination of metric shape differences for dynamic object recognition.}, web_url = {http://cercor.oxfordjournals.org/content/18/6/1302.full.pdf+html}, state = {published}, DOI = {10.1093/cercor/bhm162}, author = {Schultz J{johannes}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}} } @Poster{ 5019, title = {Human observers use personal exploration patterns in novel object recognition}, journal = {Perception}, year = {2007}, month = {8}, volume = {36}, number = {ECVP Abstract Supplement}, pages = {49}, abstract = {Humans learn and recognize objects through active exploration. Sixteen participants freely explored 3-D amoeboid objects in a virtual-reality environment during learning. They handled a device whose spatial coordinates determined the object‘s position relative to its viewpoint. These exploration patterns were also recorded for testing. In a subsequent old/new recognition test, participants either actively explored or passively viewed old (learned) and new objects in the same setup. Generally, active participants performed better than passive participants (in terms of sensitivity: d 0 &#136; 1:08 vs 0.84, respectively). Despite this, those participants who passively viewed objects animated with their personal motion trajectories for learned objects maintained com- parable performance to that of participants who actively explored the objects (d 0 &#136; 1:13). In contrast, passive observers‘ performance decreased when these trajectories were temporally reversed (d 0 &#136; 0:69) or when another observer‘s motion trajectories were used (d 0 &#136; 0:70). While active exploration generally allowed better recognition of objects compared to passive viewing, our observers could rely on idiosyncratic exploration patternsöin which particular aspects of object structure were revealed over timeöto achieve equivalent performance.}, web_url = {http://pec.sagepub.com/content/36/1_suppl.toc}, event_name = {30th European Conference on Visual Perception}, event_place = {Arezzo, Italy}, state = {published}, DOI = {10.1177/03010066070360S101}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4860, title = {Personal Exploratory Experience of an Object Facilitates Its Subsequent Recognition}, year = {2007}, month = {7}, volume = {10}, pages = {103}, abstract = {Current research shows that human object recognition is sensitive to the learned order of familiar object views (e.g. [1]). This temporal order of views could be determined by how an observer manipulates an object during learning e.g., rigid rotations in depth. In fact, the freedom to manipulate objects during learning is also known to improve subsequent recognition from single static images [2]. In this study, sixteen participants learned novel 3D amoeboid objects by manipulating them in a virtual reality environment. This required the use of a marker tracking system (VICON) and a head-mounted display (z800 3DVisor eMagin). Our participants handled a tracked device whose spatial coordinates, relative to the observers’ viewpoint, determined the position and orientation of a virtual object that was presented via the head-mounted display. Hence, this device acted as a physical substitute for the virtual object and its coordinates were recorded as motion trajectories. In a subsequent old/new recognition test, participants either actively explored or passively viewed old (learned) and new objects in the same setup. Generally, “active” participants performed better than “passive” participants (in terms of sensitivity: d’=1.08 vs. 0.84 respectively). Nonetheless, passive viewing of learned objects that were animated with their learned motion trajectories resulted in comparably good performance (d’=1.13). The performance decrease was specific to passively viewing learned objects that either had their learned motion trajectories temporally reversed (d’=0.69) or followed another observer’s motion trajectories (d’=0.70). Therefore, object recognition performance from passively viewing one’s past explorations of the learned object is comparable to actively exploring the learned object itself. These results provide further support for a dependence on temporal ordering of views during object recognition. Finally, these results could also be considered in the context of studies that highlight the human ability of discriminating one’s own actions from other people’s actions e.g., hand gestures, handwriting, dart-throwing, full-body walking and ballet (for discussion and examples, see [3]). Here, our study also showed better recognition from viewing videos of self-generated actions. Nonetheless, this recognition benefit was specifically for the learned objects, which were not concretely embodied in the observer’s person. Moreover, animating new objects with the participants’ own actions did not increase their familiarity. We conclude by suggesting that our observers’ did not merely show a familiarity with their past actions but rather, with the idiosyncratic visual experiences that their own actions created.}, file_url = {fileadmin/user_upload/files/publications/TWK-2007-Chuang.pdf}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang LL{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 4256, title = {An active approach to object recognition}, year = {2006}, month = {11}, volume = {7}, pages = {13}, abstract = {In visual object recognition, it is important to understand which object properties are important for learning. Typically, this is done by comparing recognition performance across experimental conditions that manipulate and isolate different aspects of object properties e.g., distinctive features. However, such an approach requires object properties to be explicitly specified prior to testing and is, hence, limited by the experimenter’simagination (or the lack thereof). Here, I will present a different approach to studying this problem. Rather than predefine the object properties of interest, participants are free to explore all aspects of a set of novel 3D objects during learning. Raw data are collected on observers’ patterns of exploration and analyses are subsequently applied to understand which object properties are valued by the observers during learning. In my presentation, I will describe the technical apparatus that supports this experimental approach. In addition, I will provide details on how raw data are collected and the methods of post-hoc analyses that can be applied to the data. There are several advantages to this approach in addition to those already mentioned. Firstly, this approach places control in the hands of the observer. Thus, stimulus presentation is determined by the observer’s goals rather than the experimenter’s preconceptions. This results in findings that are closer to ecological validity. Also, the raw data lend itself to reanalysis when new methods of analyses are devised or when previously unconsidered object properties later prove to be relevant for object learning. The purpose of this presentation is to generate an open discussion on the merits and disadvantages of this approach to studying visual object recognition.}, event_name = {7th Conference of the Junior Neuroscientists of Tübingen (NeNa 2006)}, event_place = {Oberjoch, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ ChuangVTB2006, title = {Familiar form and motion influence perceptual dominance}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {33}, abstract = {Binocular rivalry can occur when two different stimuli are presented separately to each eye. Typically, the dominant percept alternates between the two presented stimuli. Prior studies have shown that perceptual dominance can be induced by low-level factors such as luminance as well as high-level factors such as object categories, suggesting that rivalry reflects competition at multiple levels of visual processing. Here, we investigated whether learned shape and motion of rigidly rotating objects can bias perceptual dominance during binocular rivalry. Observers first learned four novel objects that each rotated in a specific direction. These objects were randomly created by free-form deformation techniques. Following learning, we induced binocular rivalry between a learned object and a novel distractor. The learned object could rotate in its learned or reversed direction. For comparison purposes, we also included pairs of only novel objects. Initial results show that learned objects rotating in their learned direction are perceptually dominant more often than the paired distractors. Learned objects rotating in reverse do not appear to differ from novel objects in terms of perceived dominance. These findings suggest that binocular rivalry could provide a useful implicit measure of the roles played by shape and motion during object recognition.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4226, title = {Human perception and recognition of metric changes of part-based dynamic novel objects}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {99}, abstract = {The role of object parts is a key issue in object recognition. Here we investigated whether observers encode qualitative (eg straight versus curved part) or metric information of parts (eg curvature magnitude), and whether the information that is encoded can be affected by motion. To address these issues, we constructed a novel set of objects composed of parts that can vary metrically along different dimensions (eg tapering and bending) to create qualitatively different parts. In a same/different matching task, we presented two objects rigidly rotating in the same or different direction, and had observers judge whether these objects were the same or different. We varied the pair of objects along an ‘identity‘ axis by morphing between two exemplars. A cumulative Gaussian function explained the effect of morph level, suggesting that observers encoded metric information. There was a slight shift of the psychometric function for same versus different motion. Overall, our results suggest that observers are sensitive to metric information, even for objects with salient part structure. We are currently investigating with fMRI how object parts and motion influence neuronal object processing.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg, Russia}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Schultz J{johannes}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ 4224, title = {Motion from the bottom up: From detection to cognition}, journal = {Perception}, year = {2006}, month = {8}, volume = {35}, number = {ECVP Abstract Supplement}, pages = {69}, abstract = {Motion signals projected onto the retina serve many different yet essential behavioral functions: from quickly detecting objects and segmenting them from background clutter, to effectively navigating through a dynamic environment and recognizing and interacting with objects populating that environment. Not surprisingly, computer scientists, psychologists, cognitive scientists, and neuroscientists alike have actively studied the perception and processing of visual motion. Until recently, the general approach has been to investigate mechanisms of motion perception relevant for specific purposes and typically focused at a specific level of processing, such as stimulus- or cognitively-driven mechanisms. Although this approach has greatly extended our knowledge and appreciation of visual motion processing, it is less clear how motion information relates across these different levels. The purpose of this symposium is to bridge the gap between these levels of visual motion processing and foster discussion between re searchers across the various levels.}, web_url = {http://pec.sagepub.com/content/35/1_suppl.toc}, event_name = {29th European Conference on Visual Perception}, event_place = {St. Petersburg, Russia}, state = {published}, DOI = {10.1177/03010066060350S101}, author = {Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Pilz KS{kpilz}{Department Human Perception, Cognition and Action}; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Poster{ 4007, title = {Role of familiar object motion in recognising objects across viewpoints}, journal = {Journal of Vision}, year = {2006}, month = {6}, volume = {6}, number = {6}, pages = {314}, abstract = {Unfamiliar viewpoints can hinder visual object recognition from 2D static images. Here, we ask whether the same is true when visual input is in the form of dynamic spatio-temporal sequences, such as would accompany object or observer motion. Previous research has shown that such motion can be characteristic for a particular object and hence provide additional cues to identity. In two experiments we demonstrate that learned object motion can facilitate recognition across unfamiliar viewpoints. In each experiment, 24 participants were trained to discriminate between two novel amoeboid-like objects seen from a fixed viewpoint. These objects either deformed nonrigidly (Experiment 1) or rotated rigidly about a horizontal axis (Experiment 2). Both types of motion presented the observer with a coherent sequence of change that had a unique temporal order. After training, participants underwent a 2-interval-forced-choice task that tested their ability to discriminate the two learned objects from two novel objects. At test, objects were presented at 0°, 10°, 20° and 30° around the vertical axis relative to the learned viewpoint, and in the learned or reversed temporal order. The manipulation of temporal order has previously been used to study the contribution of motion to object recognition. In both experiments, accuracy decreased with increasing rotations away from the learned viewpoint and there was a constant benefit for learned object motion across all viewpoints tested (Experiment 1 = 4.9%; Experiment 2 = 5.3%). These results indicate that both rigid and non-rigid motion facilitated object recognition despite disturbances in 2D shape by viewpoint changes.}, web_url = {http://www.journalofvision.org/content/6/6/314.short?related-urls=yes&legid=jov;6/6/314}, event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/6.6.314}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 3769, title = {Recognising face identity from natural and morphed smiles}, journal = {Quarterly Journal of Experimental Psychology}, year = {2006}, month = {5}, volume = {59}, number = {5}, pages = {801-808}, abstract = {It is easier to identify a degraded familiar face when it is shown moving (smiling, talking; nonrigid motion), than when it is displayed as a static image (Knight & Johnston, 1997; Lander, Christie, & Bruce, 1999). Here we explore the theoretical underpinnings of the moving face recognition advantage. In Experiment 1 we show that the identification of personally familiar faces when shown naturally smiling is significantly better than when the person is shown artificially smiling (morphed motion), as a single static neutral image or as a single static smiling image. In Experiment 2 we demonstrate that speeding up the motion significantly impairs the recognition of identity from natural smiles, but has little effect on morphed smiles. We conclude that the recognition advantage for face motion does not reflect a general benefit for motion, but suggests that, for familiar faces, information about their characteristic motion is stored in memory.}, web_url = {http://www.informaworld.com/smpp/ftinterface~content=a746009644~fulltext=713240930}, state = {published}, DOI = {10.1080/17470210600576136}, author = {Lander K; Chuang L{chuang}; Wickham L} } @Article{ 3770, title = {Recognising novel deforming objects}, journal = {Visual Cognition}, year = {2006}, month = {5}, volume = {14}, number = {1}, pages = {85-88}, abstract = {Current theories of visual object recognition tend to focus on static properties, particularly shape. Nonetheless, visual perception is a dynamic experience–as a result of active observers or moving objects. Here, we investigate whether dynamic information can influence visual object-learning. Three learning experiments were conducted that required participants to learn and subsequently recognize different non-rigid objects that deformed over time. Consistent with previous studies of rigid depth-rotation, our results indicate that human observers do represent object-motion. Furthermore, our data suggest that dynamic information could compensate for when static cues are less reliable, for example, as a result of viewpoint variation.}, web_url = {http://www.informaworld.com/smpp/ftinterface~content=a747834181~fulltext=713240930}, state = {published}, DOI = {10.1080/13506280600627756}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4839, title = {Recognising Dynamic Object Across Viepoints}, year = {2006}, month = {3}, volume = {9}, pages = {118}, abstract = {Recognizing objects across viewpoints presents the visual system with an extremely challenging task. This would be particularly true if learned representations were solely determined by spatial properties. However, a number of recent studies have shown that observers are also highly sensitive to characteristic object motion. Could the availability of characteristic spatial-temporal patterns in the natural environment help explain the ability to generalise across viewpoints? Here, we examined how familiar object motion (both rigid and nonrigid) improves object recognition across different viewpoints. In both experiments, participants were first familiarised with two novel dynamic objects from a fixed viewpoint. These objects presented the observer with a coherent sequence of change that had a unique temporal order, resulting from either rotating a rigid object about the horizontal axis (Experiment 1) or through a characteristic deformation of a nonrigid object (Experiment 2). Subsequently, participants were tested for their ability to discriminate these learned objects from new distractors using a 2-interval-forced-choice task. During test, objects were presented at 0°, 10°, 20° and 30° around the vertical axis relative to the learned viewpoint, and in the learned or reversed temporal order. Motion reversal is a common manipulation used to disrupt spatiotemporal properties, without interfering with the object’s spatial characteristics. In both experiments, accuracy decreased with increasing variance from the learned viewpoint. Nonetheless, objects were consistently better recognised when presented in the learned motion sequence (mean accuracy: Expt 1 = 86%; Expt 2 = 81%)compared to the reverse motion condition (mean accuracy: Expt 1 = 81%; Expt 2 = 76%), across all viewpoints tested (Expt 1: F(1,23)=13.94, p<0.01; Expt 2: F(1,23)=8.78, p<0.01). These results indicate that both rigid and non-rigid motion facilitated object recognition despite disturbances in 2D shape by viewpoint changes.}, file_url = {fileadmin/user_upload/files/publications/TWK-Chuang.pdf}, event_name = {9th Tübingen Perception Conference (TWK 2006)}, event_place = {Tübingen, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 3771, title = {Recognising novel deforming objects}, year = {2005}, month = {11}, day = {9}, pages = {3}, abstract = {Current theories of visual object recognition tend to focus on static properties, particularly shape. Nonetheless, visual perception is a dynamic experience–as a result of active observers or moving objects. Here, we investigate whether dynamic information can influence visual object-learning. Three learning experiments were conducted that required participants to learn and subsequently recognize different non-rigid objects that deformed over time. Consistent with previous studies of rigid depth-rotation, our results indicate that human observers do represent object-motion. Furthermore, our data suggest that dynamic information could compensate for when static cues are less reliable, for example, as a result of viewpoint variation.}, web_url = {http://www.opam.net/archive/opam2005/OPAM05Abstracts.pdf}, event_name = {13th Annual Workshop on Object Perception, Attention, and Memory (OPAM 2005)}, event_place = {Toronto, Canada}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ LanderC2005, title = {Recognizing Face Identity from Natural and Morphed Smiles}, year = {2005}, month = {9}, day = {3}, pages = {72}, abstract = {People find it is easier to recognise the identity of a familiar face in non-optimum viewing conditions when it is moving (smiling, talking), compared to when shown as a static image. Here we explore the theoretical underpinnings of the moving face recognition advantage. Specifically, we compare the identification of personally familiar faces from natural smile sequences (dynamic morphing), artificial smile sequences, single static neutral images and single static smiling images. Results showed recognition was best when the face was viewed naturally smiling. A further experiment investigated the impact of motion tempo on the recognition of morphed familiar faces. Results indicate a significant interaction between the naturalness of the motion and the speed of the observed motion. We conclude that the recognition advantage for face motion does not reflect a general benefit for motion, but instead suggests that, for familiar faces, information about their characteristic motion is stored in memory.}, web_url = {http://escop.eu/site_media/uploads/14th.pdf}, event_name = {14th Bi-Annual Meeting of the European Society for Cognitive Psychology (ESCOP 2005)}, event_place = {Leiden, The Netherlands}, state = {published}, author = {Lander K; Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Conference{ Chuang2005, title = {Why use Line Drawings?}, year = {2005}, month = {9}, volume = {6}, pages = {8}, abstract = {Studies in the field of visual object recognition generally report observed human performance with 2D still images e.g. photographs, line-drawings. One of the main reasons for doing so stems from the ready availability of such stimuli for experimentation (for example, see http://www.cog.brown.edu/~tarr/projects/databank.html). Human visual perception, however, is a dynamic process - as the result of either an active observer or a moving target, the visual experience is rarely static. Hence, it is important to question whether such findings realistically portray daily human behavior. Recent experiments using dynamic stimuli have shown that human performance can differ as a result of introducing natural motion information to the studied object; for example, there is a recognition benefit for when faces are seen moving (e.g., Toole et al, 2002). Such evidence clearly suggests that object motion plays a non-trivial role in visual recognition. Nonetheless, there are challenges - both technical and experimental - that a researcher ought to consider when using dynamic stimuli. Here, I will discuss some of these issues as well as the steps that were adopted, in my research, to overcome them. In particular, I will describe how different types of dynamic stimuli could be generated for various experiments in novel object and face learning, as well as some of software and hardware available for this undertaking. In addition, I will briefly discuss how such stimuli could be presented in psychophysical experiments, such as to control for possible artifacts e.g., timing errors.}, web_url = {http://www.neuroschool-tuebingen-nena.de/index.php?id=284}, event_name = {6. Neurowissenschaftliche Nachwuchskonferenz Tübingen (NeNa '05)}, event_place = {Blaubeuren, Germany}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 3450, title = {Recognizing novel deforming objects}, year = {2005}, month = {8}, pages = {158}, abstract = {Human visual recognition can be improved with object motion (e.g., faces, Lander and Chuang, 2005; rigid objects, Vuong and Tarr, 2004) This improvement suggests that it is not merely shape information that characterizes an object. Rather, human observers may also represent how shape changes over time for recognition.}, file_url = {/fileadmin/user_upload/files/publications/pdf3450.pdf}, web_url = {http://portal.acm.org/citation.cfm?id=1080438}, editor = {Bülthoff, H.H., T. Troscianko}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {2nd Symposium on Applied Perception in Graphics and Visualization (APGV 2005)}, event_place = {La Coruña, Spain}, state = {published}, ISBN = {1-59593-139-2}, DOI = {10.1145/1080402.1080438}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4004, title = {Sequence selectivity of form transformation in visual object recognition}, journal = {Perception}, year = {2005}, month = {8}, volume = {34}, number = {ECVP Abstract Supplement}, pages = {130}, abstract = {Object motion, eg depth-rotation, provides visual information that might be useful for the reconstruction of an object's 3-D structure, hence increasing the recognition likelihood of any given moving object. Our aim is to demonstrate that object motion can, in itself, serve as an independent cue to object identity without particular recourse to form-retrieval processes. In this study, we used novel amoeboid objects that transformed nonrigidly over time. Two experiments are reported on the learnt recognition of such stimuli. During an initial study phase, participants learnt to identify these objects. At test, participants were either presented with an old/new recognition task (experiment 1) or with a two-alternative forced-choice task (experiment 2). Here, learnt stimuli were presented in either the studied sequence of shape transformations, or the reverse order. Although the shapes shown were the same in both instances, the overall findings indicate that participants performed significantly better in recognising the learnt objects when the same shapes were presented in the learnt sequence, than when they were presented in reverse sequence. If object motion facilitates recognition of the stimulus solely by contributing to the recovery of its form, the sequence of non-rigid transformation would not be relevant to its representation. Nonetheless, these findings suggest that human observers do not merely remember a visual object as a collection of different shapes. Instead, observers are also sensitive to how these shapes transform over time.}, web_url = {http://pec.sagepub.com/content/34/1_suppl.toc}, event_name = {28th European Conference on Visual Perception}, event_place = {A Coruña, Spain}, state = {published}, DOI = {10.1177/03010066050340S101}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Conference{ 3772, title = {Motion matters: learning dynamic objects}, year = {2005}, month = {8}, abstract = {Previous research has typically focused on static properties of objects. Recently there has been a growing interest in the role that dynamic information might play in the perception and representation of objects. In this talk we approach this issue by describing how the visual system utilises dynamic information in learning two different classes of visual objects: i) novel deforming stimuli, ii) faces. Object-learning experiments with novel objects show that human observers are sensitive to the motion characteristics. In addition, preliminary results also suggest that learned motion characteristics can reduce the detrimental effects of changing the studied viewpoint. Using faces, we explored how encoding of identity is affected by two different types of facial movements: non-rigid facial motion, and looming facial motion. Using a delayed visual search paradigm we could show that faces learned in motion were found more quickly and more accurately than faces learned from static snapshots. In summary, results from our lab suggest that the visual system uses dynamic information to encode and subsequently recognize new object/face identities.}, event_name = {Sensational Seminar Series, School of Psychology, Cardiff University}, event_place = {Cardiff, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}} } @Article{ 2929, title = {Why are moving faces easier to recognize?}, journal = {Visual Cognition}, year = {2005}, month = {4}, volume = {12}, number = {3}, pages = {429-442}, abstract = {Previous work has suggested that seeing a famous face move aids the recognition of identity, especially when viewing conditions are degraded (Knight & Johnston, 1997; Lander, Christie, & Bruce, 1999). Experiment 1 investigated whether the beneficial effects of motion are related to a particular type of facial motion (expressing, talking, or rigid motion). Results showed a significant beneficial effect of both expressive and talking movements, but no advantage for rigid motion, compared with a single static image. Experiment 2 investigated whether the advantage for motion is uniform across identity. Participants rated moving famous faces for distinctiveness of motion. The famous faces (moving and static freeze frame) were then used as stimuli in a recognition task. The advantage for face motion was significant only when the motion displayed was distinctive. Results suggest that a reason why moving faces are easier to recognize is because some familiar faces have characteristic motion patterns, which act as an additional cue to identity.}, file_url = {/fileadmin/user_upload/files/publications/pdf2929.pdf}, web_url = {http://www.informaworld.com/smpp/6084610-9322645/ftinterface~content=a713734696~fulltext=713240930~frm=content}, state = {published}, DOI = {10.1080/13506280444000382}, author = {Lander K; Chuang L{chuang}} } @Conference{ 3773, title = {Recognising Flubber: Role of motion in visual object recognition}, year = {2005}, month = {3}, event_name = {Brainstorming Colloquium, Department of Psychology, University of Manchester}, event_place = {Manchester, UK}, state = {published}, author = {Chuang L{chuang}{Department Human Perception, Cognition and Action}; Vuong QC{qvuong}{Department Human Perception, Cognition and Action}; Thornton I{ian}{Department Human Perception, Cognition and Action}} } @Article{ FautzPSH2004, title = {TRIM: TR independent multislice imaging}, journal = {Magnetic Resonance in Medicine}, year = {2004}, month = {6}, volume = {51}, number = {6}, pages = {1239-1246}, abstract = {This article introduces a novel concept to overcome the dependence of image contrast on spatial positioning parameters such as the number of slices and slice separation in multislice measurements: TR-independent multislice (TRIM) acquisition allows the number of slices in a single measurement to remain independent of the repetition time TR. Ramped TRIM (rTRIM) allows the distance between the sections excited in each repetition to remain independent of the distance between the reconstructed slices. Even images from overlapping slices can be acquired without crosstalk between the images of adjacent slices due to spatially overlapping excitation profiles. This concept is based on a special reordering scheme: Within a single TR acquisition, steps are only taken from a fraction of all slices. This necessitates attribution of different phase-encoding steps to different slices within each repetition cycle. The reordering scheme can be derived by the use of a design matrix. The imaging properties of the technique are discussed theoretically and illustrated by a point spread function analysis based on simulations and phantom measurements. Potential sources of artifacts are identified and methods for their prevention are developed. Optimized implementations with different T1-weighted sequences such as spin echo (SE), turbo spin echo (TSE), and spoiled gradient echo acquisitions are shown on normal volunteers with imaging parameters used in routine diagnosis.}, web_url = {http://onlinelibrary.wiley.com/doi/10.1002/mrm.20093/pdf}, state = {published}, DOI = {10.1002/mrm.20093}, author = {Fautz H-P; Paul D; Scheffler K{scheffler}; Hennig J} } @Conference{ 2932, title = {The importance of motion for learning and recognising faces.}, year = {2004}, month = {1}, event_name = {76th Vision Seminar}, event_place = {ATR Laboratories, Japan}, state = {published}, author = {Lander K; Chuang L{chuang}; Bruce V} } @Inproceedings{ PaulSH2003, title = {Verbessertes Off-Resonanz-Verhalten bei TrueFISP-Sequenzen durch variable Flipwinkel (TIDE)}, year = {2003}, month = {9}, pages = {31-32}, abstract = {Das Signal von steady-state free precession (SSFP)-Sequenzen (wie z.B. TrueFISP, FIESTA, balanced FFE) zeigt während der transienten Phase mit zunehmender Off-Resonanz-Frequenz zunehmende Fluktuationen. Um Aufnahmeartefakte von Off-Resonanz-Spins zu vermeiden, ist daher bei SSFP-Sequenzen eine deutlich längere Präparationsphase notwendig. Eine deutliche Verbesserung des Signalverhaltens wird durch einen linearen Übergang des Flipwinkels von einem anfänglichen hohen Wert zu einem Magnetisierungsgleichgewichts (steady-state)-Flipwinkel α0 erreicht (TIDE = transition into driven equilibrium). Zusätzlich erlaubt TIDE eine Verbesserung des SNR und eine Kontrastmodifikation bei SSFP-Sequenzen.}, web_url = {http://www.ismrm.de/images/documents/DS-ISMRM_2003_Abstractband.pdf}, event_name = {6. Jahrestagung der Deutschen Sektion der ISMRM (DS ISMRM 2003)}, event_place = {Heidelberg, Germany}, state = {published}, author = {Paul D; Scheffler K{scheffler}; Hennig J} } @Conference{ 2935, title = {The role of motion in learning new faces.}, year = {2003}, month = {9}, event_name = {European Conference on Cognitive Psychology}, event_place = {Granada, Spain}, state = {published}, author = {Lander K; Chuang L{chuang}; Bruce V} } @Conference{ 2934, title = {What aspects of facial motion are beneficial for recognition?}, year = {2003}, month = {7}, event_name = {12th International Conference on Perception and Action}, event_place = {Perth, Australia}, state = {published}, author = {Lander K; Chuang L{chuang}} } @Article{ WintererSPHSAL2000, title = {Optimization of Contrast-Enhanced MR Angiography of the Hands with a Timing Bolus and Elliptically Reordered 3D Pulse Sequence}, journal = {Journal of Computer Assisted Tomography}, year = {2000}, month = {11}, volume = {24}, number = {6}, pages = {903-908}, abstract = {Our objective was to optimize bolus administration and sequence setting in gadolinium-enhanced magnetic resonance (MR) angiography of the hands. Elliptically reordered three-dimensional (3D) spoiled gradient-echo sequence with non-slab-selective radio frequency excitation was optimized according to the measurements of arterial and venous time-signal curves in 21 patients. Great variations in bolus arrival time and arterio-venous transit time could be observed. In most patients high-quality arterial depiction could be obtained with minor venous contamination. Contrast-to-noise, spatial resolution, and selective arterial filling is still a challenge for 3D MR angiography of the hand but can be optimized using Gadolinium-BOPTA and a dedicated pulse sequence setting with exact bolus timing.}, web_url = {http://journals.lww.com/jcat/pages/articleviewer.aspx?year=2000&issue=11000&article=00017&type=abstract}, state = {published}, author = {Winterer JT; Scheffler K{scheffler}; Paul G; Hauer M; Sch\"afer O; Altehoefer C; Laubenberger J} }