@Proceedings{ RosenbergKWMBSI2017, title = {2017 IEEE Virtual Reality (VR 2017)}, year = {2017}, month = {3}, pages = {476}, web_url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7889401}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {2017 IEEE Virtual Reality}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-1-5090-6647-6}, author = {Rosenberg ES; Krum DM; Wartell Z; Mohler B{mohler}; Babu SV; Steinicke F; Interrante V} } @Proceedings{ ImuraFM2015, title = {International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments: ICAT-EGVE 2015}, year = {2015}, month = {10}, pages = {190}, web_url = {http://dblp2.uni-trier.de/db/conf/egve/icat-egve2015}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, event_name = {25th International Conference on Artificial Reality and Telexistence and the 20th Eurographics Symposium on Virtual Environments (ICAT-EGVE 2015)}, event_place = {Kyoto, Japan}, state = {published}, ISBN = {978-3-905674-84-2}, author = {Imura M; Figueroa P; Mohler BJ{mohler}} } @Proceedings{ BaileyKMS2014, title = {ACM Symposium on Applied Perception}, year = {2014}, month = {8}, pages = {136}, web_url = {http://dl.acm.org/citation.cfm?id=2628257&coll=DL&dl=GUIDE&CFID=595083652&CFTOKEN=12884097}, publisher = {ACM}, address = {New York, NY, USA}, event_name = {ACM Symposium on Applied Perception (SAP '14)}, event_place = {Vancouver, Canada}, state = {published}, ISBN = {978-1-4503-3009-1}, author = {Bailey R; Kuhl S; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Singh K} } @Proceedings{ MohlerRSS2013, title = {5th Joint Virtual Reality Conference}, year = {2013}, month = {12}, pages = {94}, web_url = {http://dl.acm.org/citation.cfm?id=2600262&coll=DL&dl=GUIDE&CFID=595083652&CFTOKEN=12884097}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {5th Joint Virtual Reality Conference (JVRC '13)}, event_place = {Paris, France}, state = {published}, ISBN = {978-3-905674-47-7}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; Raffin B; Saito H; Staadt O} } @Proceedings{ 5234, title = {Virtual Environments 2008}, journal = {Proceedings of the 14th Eurographics Symposium on Virtual Environments}, year = {2008}, month = {5}, pages = {126}, web_url = {http://www.kyb.tuebingen.mpg.de/EGVE/}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, event_name = {14th Eurographics Symposium on Virtual Environments (EGVE 2008)}, event_place = {Eindhoven, Netherlands}, state = {published}, ISBN = {978-3-905674-06-4}, author = {van Liere R; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ MolbertKTMBMKZG2017, title = {Depictive and metric body size estimation in anorexia nervosa and bulimia nervosa: A systematic review and meta-analysis}, journal = {Clinical Psychology Review}, year = {2017}, month = {11}, volume = {57}, pages = {21–31}, abstract = {A distorted representation of one's own body is a diagnostic criterion and core psychopathology of both anorexia nervosa (AN) and bulimia nervosa (BN). Despite recent technical advances in research, it is still unknown whether this body image disturbance is characterized by body dissatisfaction and a low ideal weight and/or includes a distorted perception or processing of body size. In this article, we provide an update and meta-analysis of 42 articles summarizing measures and results for body size estimation (BSE) from 926 individuals with AN, 536 individuals with BN and 1920 controls. We replicate findings that individuals with AN and BN overestimate their body size as compared to controls (ES = 0.63). Our meta-regression shows that metric methods (BSE by direct or indirect spatial measures) yield larger effect sizes than depictive methods (BSE by evaluating distorted pictures), and that effect sizes are larger for patients with BN than for patients with AN. To interpret these results, we suggest a revised theoretical framework for BSE that accounts for differences between depictive and metric BSE methods regarding the underlying body representations (conceptual vs. perceptual, implicit vs. explicit). We also discuss clinical implications and argue for the importance of multimethod approaches to investigate body image disturbance.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0272735816304822}, state = {published}, DOI = {10.1016/j.cpr.2017.08.005}, author = {M\"olbert SC{smoelbert}; Klein L; Thaler A{athaler}; Mohler BJ{mohler}; Brozzo C{cbrozzo}; Martus P; Karnath H-; Zipfel S; Giel KE} } @Article{ MolbertTSBKZMG2017_2, title = {Investigating Body Image Disturbance in Anorexia Nervosa Using Novel Biometric Figure Rating Scales: A Pilot Study}, journal = {European Eating Disorders Review}, year = {2017}, month = {11}, volume = {25}, number = {6}, pages = {607–612}, abstract = {This study uses novel biometric figure rating scales (FRS) spanning body mass index (BMI) 13.8 to 32.2 kg/m2 and BMI 18 to 42 kg/m2. The aims of the study were (i) to compare FRS body weight dissatisfaction and perceptual distortion of women with anorexia nervosa (AN) to a community sample; (ii) how FRS parameters are associated with questionnaire body dissatisfaction, eating disorder symptoms and appearance comparison habits; and (iii) whether the weight spectrum of the FRS matters. Women with AN (n = 24) and a community sample of women (n = 104) selected their current and ideal body on the FRS and completed additional questionnaires. Women with AN accurately picked the body that aligned best with their actual weight in both FRS. Controls underestimated their BMI in the FRS 14–32 and were accurate in the FRS 18–42. In both FRS, women with AN desired a body close to their actual BMI and controls desired a thinner body. Our observations suggest that body image disturbance in AN is unlikely to be characterized by a visual perceptual disturbance, but rather by an idealization of underweight in conjunction with high body dissatisfaction. The weight spectrum of FRS can influence the accuracy of BMI estimation. Copyright © 2017 John Wiley & Sons, Ltd and Eating Disorders Association.}, web_url = {http://onlinelibrary.wiley.com/doi/10.1002/erv.2559/epdf}, state = {published}, DOI = {10.1002/erv.2559}, author = {M\"olbert SC{smoelbert}; Thaler A{athaler}; Streuber S{stst}; Black MJ{black}; Karnath H-O; Zipfel S; Mohler B{mohler}; Giel KE} } @Article{ MolbertTMSRBZKG2017, title = {Assessing body image in anorexia nervosa using biometric self-avatars in virtual reality: Attitudinal components rather than visual body size estimation are distorted}, journal = {Psychological Medicine}, year = {2017}, month = {7}, volume = {Epub ahead}, abstract = {Body image disturbance (BID) is a core symptom of anorexia nervosa (AN), but as yet distinctive features of BID are unknown. The present study aimed at disentangling perceptual and attitudinal components of BID in AN. We investigated n = 24 women with AN and n = 24 controls. Based on a three-dimensional (3D) body scan, we created realistic virtual 3D bodies (avatars) for each participant that were varied through a range of ±20% of the participants’ weights. Avatars were presented in a virtual reality mirror scenario. Using different psychophysical tasks, participants identified and adjusted their actual and their desired body weight. To test for general perceptual biases in estimating body weight, a second experiment investigated perception of weight and shape matched avatars with another identity. Women with AN and controls underestimated their weight, with a trend that women with AN underestimated more. The average desired body of controls had normal weight while the average desired weight of women with AN corresponded to extreme AN (DSM-5). Correlation analyses revealed that desired body weight, but not accuracy of weight estimation, was associated with eating disorder symptoms. In the second experiment, both groups estimated accurately while the most attractive body was similar to Experiment 1. Our results contradict the widespread assumption that patients with AN overestimate their body weight due to visual distortions. Rather, they illustrate that BID might be driven by distorted attitudes with regard to the desired body. Clinical interventions should aim at helping patients with AN to change their desired weight.}, web_url = {https://www.cambridge.org/core/services/aop-cambridge-core/content/view/A111D545D17E2F260D62434C254F3951/S0033291717002008a.pdf/assessing_body_image_in_anorexia_nervosa_using_biometric_selfavatars_in_virtual_reality_attitudinal_components_rather_than_visu}, state = {published}, DOI = {10.1017/S0033291717002008}, author = {M\"olbert SC{smoelbert}; Thaler A{athaler}; Mohler BJ{mohler}; Streuber S{stst}; Romero J{jromero}; Black MJ{black}; Zipfel S; Karnath H-O; Giel KE} } @Article{ BulthoffMT2017, title = {Face recognition of full-bodied avatars by active observers in a virtual environment}, journal = {Vision Research}, year = {2017}, month = {7}, state = {submitted}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Article{ SaultonMBD2016, title = {Egocentric biases in comparative volume judgments of rooms}, journal = {Journal of Vision}, year = {2016}, month = {4}, volume = {16}, number = {6:2}, pages = {1-16}, abstract = {The elongation of a figure or object can induce a perceptual bias regarding its area or volume estimation. This bias is notable in Piagetian experiments in which participants tend to consider elongated cylinders to contain more liquid than shorter cylinders of equal volume. We investigated whether similar perceptual biases could be found in volume judgments of surrounding indoor spaces and whether those judgments were viewpoint dependent. Participants compared a variety of computer-generated rectangular rooms with a square room in a psychophysical task. We found that the elongation bias in figures or objects was also present in volume comparison judgments of indoor spaces. Further, the direction of the bias (larger or smaller) depended on the observer's viewpoint. Similar results were obtained from a monoscopic computer display (Experiment 1) and stereoscopic head-mounted display with head tracking (Experiment 2). We used generalized linear mixed-effect models to model participants' volume judgments using a function of room depth and width. A good fit to the data was found when applying weight on the depth relative to the width, suggesting that participants' judgments were biased by egocentric properties of the space. We discuss how biases in comparative volume judgments of rooms might reflect the use of simplified strategies, such as anchoring on one salient dimension of the space.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2512824&resultClick=1}, state = {published}, DOI = {10.1167/16.6.2}, author = {Saulton A{asaulton}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Dodds TJ{dodds}{Department Human Perception, Cognition and Action}} } @Article{ GeussSCTM2015, title = {Effect of Display Technology on Perceived Scale of Space}, journal = {Human Factors}, year = {2015}, month = {11}, volume = {57}, number = {7}, pages = {1235-1247}, abstract = {Objective: Our goal was to evaluate the degree to which display technologies influence the perception of size in an image. Background: Research suggests that factors such as whether an image is displayed stereoscopically, whether a user’s viewpoint is tracked, and the field of view of a given display can affect users’ perception of scale in the displayed image. Method: Participants directly estimated the size of a gap by matching the distance between their hands to the gap width and judged their ability to pass unimpeded through the gap in one of five common implementations of three display technologies (two head-mounted displays [HMD] and a back-projection screen). Results: Both measures of gap width were similar for the two HMD conditions and the back projection with stereo and tracking. For the displays without tracking, stereo and monocular conditions differed from each other, with monocular viewing showing underestimation of size. Conclusions: Display technologies that are capable of stereoscopic display and tracking of the user’s viewpoint are beneficial as perceived size does not differ from real-world estimates. Evaluations of different display technologies are necessary as display conditions vary and the availability of different display technologies continues to grow. Applications: The findings are important to those using display technologies for research, commercial, and training purposes when it is important for the displayed image to be perceived at an intended scale.}, web_url = {http://hfs.sagepub.com/content/57/7/1235.full.pdf+html}, state = {published}, DOI = {10.1177/0018720815590300}, author = {Geuss MN{mgeuss}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; Creem-Regehr SH; Thompson WB; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ DobrickiM2015, title = {Self-Identification With Another’s Body Alters Self-Other Face Distinction}, journal = {Perception}, year = {2015}, month = {7}, volume = {44}, number = {7}, pages = {814-820}, abstract = {When looking into a mirror healthy humans usually clearly perceive their own face. Such an unambiguous face self-perception indicates that an individual has a discrete facial self-representation and thereby the involvement of a self-other face distinction mechanism. We have stroked the trunk of healthy individuals while they watched the trunk of a virtual human that was facing them being synchronously stroked. Subjects sensed self-identification with the virtual body, which was accompanied by a decrease of their self-other face distinction. This suggests that face self-perception involves the self-other face distinction and that this mechanism is underlying the formation of a discrete representation of one’s face. Moreover, the self-identification with another’s body that we find suggests that the perception of one’s full body affects the self-other face distinction. Hence, changes in self-other face distinction can indicate alterations of body self-perception, and thereby serve to elucidate the relationship of face and body self-perception.}, web_url = {http://pec.sagepub.com/content/44/7/814.full.pdf+html}, state = {published}, DOI = {10.1177/0301006615594697}, author = {Dobricki M{mdobricki}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ LeyrerLBM2015_2, title = {The Importance of Postural Cues for Determining Eye Height in Immersive Virtual Reality}, journal = {PLoS ONE}, year = {2015}, month = {5}, volume = {10}, number = {5}, pages = {1-23}, abstract = {In human perception, the ability to determine eye height is essential, because eye height is used to scale heights of objects, velocities, affordances and distances, all of which allow for successful environmental interaction. It is well understood that eye height is fundamental to determine many of these percepts. Yet, how eye height itself is provided is still largely unknown. While the information potentially specifying eye height in the real world is naturally coincident in an environment with a regular ground surface, these sources of information can be easily divergent in similar and common virtual reality scenarios. Thus, we conducted virtual reality experiments where we manipulated the virtual eye height in a distance perception task to investigate how eye height might be determined in such a scenario. We found that humans rely more on their postural cues for determining their eye height if there is a conflict between visual and postural information and little opportunity for perceptual-motor calibration is provided. This is demonstrated by the predictable variations in their distance estimates. Our results suggest that the eye height in such circumstances is informed by postural cues when estimating egocentric distances in virtual reality and consequently, does not depend on an internalized value for eye height.}, web_url = {http://www.plosone.org/article/fetchObject.action?uri=info:doi/10.1371/journal.pone.0127000&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0127000}, EPUB = {e0127000}, author = {Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ LinkenaugerBM2014, title = {Virtual arm's reach influences perceived distances but only after experience reaching}, journal = {Neuropsychologia}, year = {2015}, month = {4}, volume = {70}, pages = {393–401}, abstract = {Considerable empirical evidence has shown influences of the action capabilities of the body on the perception of sizes and distances. Generally, as one׳s action capabilities increase, the perception of the relevant distance (over which the action is to be performed) decreases and vice versa. As a consequence, it has been proposed that the body׳s action capabilities act as a perceptual ruler, which is used to measure perceived sizes and distances. In this set of studies, we investigated this hypothesis by assessing the influence of arm׳s reach on the perception of distance. By providing participant with a self-representing avatar seen in a first-person perspective in virtual reality, we were able to introduce novel and completely unfamiliar alterations in the virtual arm׳s reach to evaluate their impact on perceived distance. Using both action-based and visual matching measures, we found that virtual arm׳s reach influenced perceived distance in virtual environments. Due to the participants׳ inexperience with the reach alterations, we also were able to assess the amount of experience with the new arm׳s reach required to influence perceived distance. We found that minimal experience reaching with the virtual arm can influence perceived distance. However, some reaching experience is required. Merely having a long or short virtual arm, even one that is synchronized to one׳s movements, is not enough to influence distance perception if one has no experience reaching.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0028393214003947}, state = {published}, DOI = {10.1016/j.neuropsychologia.2014.10.034}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ LeyrerLBM2015, title = {Eye Height Manipulations: A Possible Solution to Reduce Underestimation of Egocentric Distances in Head-Mounted Displays}, journal = {ACM Transactions on Applied Perception}, year = {2015}, month = {2}, volume = {12}, number = {1:1}, pages = {1-23}, abstract = {Virtual reality technology can be considered a multipurpose tool for diverse applications in various domains, for example, training, prototyping, design, entertainment, and research investigating human perception. However, for many of these applications, it is necessary that the designed and computer-generated virtual environments are perceived as a replica of the real world. Many research studies have shown that this is not necessarily the case. Specifically, egocentric distances are underestimated compared to real-world estimates regardless of whether the virtual environment is displayed in a head-mounted display or on an immersive large-screen display. While the main reason for this observed distance underestimation is still unknown, we investigate a potential approach to reduce or even eliminate this distance underestimation. Building up on the angle of declination below the horizon relationship for perceiving egocentric distances, we describe how eye height manipulations in virtual reality should affect perceived distances. In addition, we describe how this relationship could be exploited to reduce distance underestimation for individual users. In a first experiment, we investigate the influence of a manipulated eye height on an action-based measure of egocentric distance perception. We found that eye height manipulations have similar predictable effects on an action-based measure of egocentric distance as we previously observed for a cognitive measure. This might make this approach more useful than other proposed solutions across different scenarios in various domains, for example, for collaborative tasks. In three additional experiments, we investigate the influence of an individualized manipulation of eye height to reduce distance underestimation in a sparse-cue and a rich-cue environment. In these experiments, we demonstrate that a simple eye height manipulation can be used to selectively alter perceived distances on an individual basis, which could be helpful to enable every user to have an experience close to what was intended by the content designer.}, web_url = {http://dl.acm.org/citation.cfm?id=2699254}, state = {published}, DOI = {10.1145/2699254}, author = {Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ LinkenaugerPMCBSGW2014, title = {The Perceptual Homunculus: The Perception of the Relative Proportions of the Human Body}, journal = {Journal of Experimental Psychology: General}, year = {2015}, month = {2}, volume = {144}, number = {1}, pages = {103-113}, abstract = {Given that observing one’s body is ubiquitous in experience, it is natural to assume that people accurately perceive the relative sizes of their body parts. This assumption is mistaken. In a series of studies, we show that there are dramatic systematic distortions in the perception of bodily proportions, as assessed by visual estimation tasks, where participants were asked to compare the lengths of two body parts. These distortions are not evident when participants estimate the extent of a body part relative to a noncorporeal object or when asked to estimate noncorporal objects that are the same length as their body parts. Our results reveal a radical asymmetry in the perception of corporeal and noncorporeal relative size estimates. Our findings also suggest that people visually perceive the relative size of their body parts as a function of each part’s relative tactile sensitivity and physical size.}, web_url = {http://psycnet.apa.org/journals/xge/144/1/103.pdf}, state = {published}, DOI = {10.1037/xge0000028}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Wong Hy; Geuss M{mgeuss}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; McCulloch KC; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Proffitt DR} } @Article{ VolkovadBM2014, title = {The MPI Emotional Body Expressions Database for Narrative Scenarios}, journal = {PLoS ONE}, year = {2014}, month = {12}, volume = {9}, number = {12}, pages = {1-28}, abstract = {Emotion expression in human-human interaction takes place via various types of information, including body motion. Research on the perceptual-cognitive mechanisms underlying the processing of natural emotional body language can benefit greatly from datasets of natural emotional body expressions that facilitate stimulus manipulation and analysis. The existing databases have so far focused on few emotion categories which display predominantly prototypical, exaggerated emotion expressions. Moreover, many of these databases consist of video recordings which limit the ability to manipulate and analyse the physical properties of these stimuli. We present a new database consisting of a large set (over 1400) of natural emotional body expressions typical of monologues. To achieve close-to-natural emotional body expressions, amateur actors were narrating coherent stories while their body movements were recorded with motion capture technology. The resulting 3-dimensional motion data recorded at a high frame rate (120 frames per second) provides fine-grained information about body movements and allows the manipulation of movement on a body joint basis. For each expression it gives the positions and orientations in space of 23 body joints for every frame. We report the results of physical motion properties analysis and of an emotion categorisation study. The reactions of observers from the emotion categorisation study are included in the database. Moreover, we recorded the intended emotion expression for each motion sequence from the actor to allow for investigations regarding the link between intended and perceived emotions. The motion sequences along with the accompanying information are made available in a searchable MPI Emotional Body Expression Database. We hope that this database will enable researchers to study expression and perception of naturally occurring emotional body expressions in greater depth. Figures}, web_url = {http://www.plosone.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0113647&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0113647}, EPUB = {e113647}, author = {Volkova E{evolk}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Article{ LinkenaugerGSLRPBM2014, title = {Evidence for Hand-Size Constancy: The Dominant Hand as a Natural Perceptual Metric}, journal = {Psychological Science}, year = {2014}, month = {11}, volume = {25}, number = {11}, pages = {2086-2094}, abstract = {The hand is a reliable and ecologically useful perceptual ruler that can be used to scale the sizes of close, manipulatable objects in the world in a manner similar to the way in which eye height is used to scale the heights of objects on the ground plane. Certain objects are perceived proportionally to the size of the hand, and as a result, changes in the relationship between the sizes of objects in the world and the size of the hand are attributed to changes in object size rather than hand size. To illustrate this notion, we provide evidence from several experiments showing that people perceive their dominant hand as less magnified than other body parts or objects when these items are subjected to the same degree of magnification. These findings suggest that the hand is perceived as having a more constant size and, consequently, can serve as a reliable metric with which to measure objects of commensurate size.}, web_url = {http://pss.sagepub.com/content/25/11/2086.full.pdf+html}, state = {published}, DOI = {10.1177/0956797614548875}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Geuss MN{mgeuss}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Richardson BH; Proffitt DR; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ MuellingBMSP2014, title = {Learning strategies in table tennis using inverse reinforcement learning}, journal = {Biological Cybernetics}, year = {2014}, month = {10}, volume = {108}, number = {5}, pages = {603-619}, abstract = {Learning a complex task such as table tennis is a challenging problem for both robots and humans. Even after acquiring the necessary motor skills, a strategy is needed to choose where and how to return the ball to the opponent’s court in order to win the game. The data-driven identification of basic strategies in interactive tasks, such as table tennis, is a largely unexplored problem. In this paper, we suggest a computational model for representing and inferring strategies, based on a Markov decision problem, where the reward function models the goal of the task as well as the strategic information. We show how this reward function can be discovered from demonstrations of table tennis matches using model-free inverse reinforcement learning. The resulting framework allows to identify basic elements on which the selection of striking movements is based. We tested our approach on data collected from players with different playing styles and under different playing conditions. The estimated reward function was able to capture expert-specific strategic information that sufficed to distinguish the expert among players with different skill levels as well as different playing styles.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs00422-014-0599-1.pdf}, state = {published}, DOI = {10.1007/s00422-014-0599-1}, author = {Muelling K{muelling}{Department Empirical Inference}; Boularias A{boularias}{Department Empirical Inference}; Mohler B{mohler}; Sch\"olkopf B{bs}{Department Empirical Inference}; Peters J{jrpeters}{Department Empirical Inference}} } @Article{ PiryankovaWLSLBM2014, title = {Owning an Overweight or Underweight Body: Distinguishing the Physical, Experienced and Virtual Body}, journal = {PLoS ONE}, year = {2014}, month = {8}, volume = {9}, number = {8}, pages = {1-13}, abstract = {Our bodies are the most intimately familiar objects we encounter in our perceptual environment. Virtual reality provides a unique method to allow us to experience having a very different body from our own, thereby providing a valuable method to explore the plasticity of body representation. In this paper, we show that women can experience ownership over a whole virtual body that is considerably smaller or larger than their physical body. In order to gain a better understanding of the mechanisms underlying body ownership, we use an embodiment questionnaire, and introduce two new behavioral response measures: an affordance estimation task (indirect measure of body size) and a body size estimation task (direct measure of body size). Interestingly, after viewing the virtual body from first person perspective, both the affordance and the body size estimation tasks indicate a change in the perception of the size of the participant's experienced body. The change is biased by the size of the virtual body (overweight or underweight). Another novel aspect of our study is that we distinguish between the physical, experienced and virtual bodies, by asking participants to provide affordance and body size estimations for each of the three bodies separately. This methodological point is important for virtual reality experiments investigating body ownership of a virtual body, because it offers a better understanding of which cues (e.g. visual, proprioceptive, memory, or a combination thereof) influence body perception, and whether the impact of these cues can vary between different setups.}, web_url = {http://www.plosone.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0103428&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0103428}, EPUB = {e103428}, author = {Piryankova IV{ivelina}{Department Human Perception, Cognition and Action}; Wong HY; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Stinson C{cstinson}{Department Human Perception, Cognition and Action}; Longo MR; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ VolkovaMDTB2014_2, title = {Emotion categorization of body expressions in narrative scenarios}, journal = {Frontiers in Psychology}, year = {2014}, month = {6}, volume = {5}, number = {623}, pages = {1-11}, abstract = {Humans can recognize emotions expressed through body motion with high accuracy even when the stimuli are impoverished. However, most of the research on body motion has relied on exaggerated displays of emotions. In this paper we present two experiments where we investigated whether emotional body expressions could be recognized when they were recorded during natural narration. Our actors were free to use their entire body, face, and voice to express emotions, but our resulting visual stimuli used only the upper body motion trajectories in the form of animated stick figures. Observers were asked to perform an emotion recognition task on short motion sequences using a large and balanced set of emotions (amusement, joy, pride, relief, surprise, anger, disgust, fear, sadness, shame, and neutral). Even with only upper body motion available, our results show recognition accuracy significantly above chance level and high consistency rates among observers. In our first experiment, that used more classic emotion induction setup, all emotions were well recognized. In the second study that employed narrations, four basic emotion categories (joy, anger, fear, and sadness), three non-basic emotion categories (amusement, pride, and shame) and the “neutral” category were recognized above chance. Interestingly, especially in the second experiment, observers showed a bias toward anger when recognizing the motion sequences for emotions. We discovered that similarities between motion sequences across the emotions along such properties as mean motion speed, number of peaks in the motion trajectory and mean motion span can explain a large percent of the variation in observers' responses. Overall, our results show that upper body motion is informative for emotion recognition in narrative scenarios.}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.00623/pdf}, state = {published}, DOI = {10.3389/fpsyg.2014.00623}, author = {Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Tesch J{jtesch}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ vonLassbergBMB2014, title = {Intersegmental Eye-Head-Body Interactions during Complex Whole Body Movements}, journal = {PLoS ONE}, year = {2014}, month = {4}, volume = {9}, number = {4}, pages = {1-15}, abstract = {Using state-of-the-art technology, interactions of eye, head and intersegmental body movements were analyzed for the first time during multiple twisting somersaults of high-level gymnasts. With this aim, we used a unique combination of a 16-channel infrared kinemetric system; a three-dimensional video kinemetric system; wireless electromyography; and a specialized wireless sport-video-oculography system, which was able to capture and calculate precise oculomotor data under conditions of rapid multiaxial acceleration. All data were synchronized and integrated in a multimodal software tool for three-dimensional analysis. During specific phases of the recorded movements, a previously unknown eye-head-body interaction was observed. The phenomenon was marked by a prolonged and complete suppression of gaze-stabilizing eye movements, in favor of a tight coupling with the head, spine and joint movements of the gymnasts. Potential reasons for these observations are discussed with regard to earlier findings and integrated within a functional model.}, web_url = {http://www.plosone.org/article/fetchObject.action;jsessionid=4B3F7A338218A3647FAA6C82F42F7B9F?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0095450&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0095450}, EPUB = {e95450}, author = {von Lassberg C; Beykirch KA{kab}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ HeydrichDAHBMB2013_2, title = {Visual capture and the experience of having two bodies: evidence from two different virtual reality techniques}, journal = {Frontiers in Psychology}, year = {2013}, month = {12}, volume = {4}, number = {946}, pages = {1-15}, abstract = {In neurology and psychiatry the detailed study of illusory own body perceptions has suggested close links between bodily processing and self-consciousness. One such illusory own body perception is heautoscopy where patients have the sensation of being reduplicated and to exist at two or even more locations. In previous experiments, using a video head-mounted display, self-location and self-identification were manipulated by applying conflicting visuo-tactile information. Yet the experienced singularity of the self was not affected, i.e., participants did not experience having multiple bodies or selves. In two experiments presented in this paper, we investigated self-location and self-identification while participants saw two virtual bodies (video-generated in study 1 and 3D computer generated in study 2) that were stroked either synchronously or asynchronously with their own body. In both experiments, we report that self-identification with two virtual bodies was stronger during synchronous stroking. Furthermore, in the video generated setup with synchronous stroking participants reported a greater feeling of having multiple bodies than in the control conditions. In study 1, but not in study 2, we report that self-location – measured by anterior posterior drift – was significantly shifted towards the two bodies in the synchronous condition only. Self-identification with two bodies, the sensation of having multiple bodies, and the changes in self-location show that the experienced singularity of the self can be studied experimentally. We discuss our data with respect to ownership for supernumerary hands and heautoscopy. We finally compare the effects of the video and 3D computer generated head-mounted display technology and discuss the possible benefits of using either technology to induce changes in illusory self-identification with a virtual body.}, web_url = {http://www.frontiersin.org/Journal/DownloadFile.ashx?pdf=1&FileId=21551&articleId=66408&Version=1&ContentTypeId=21&FileName=fpsyg-04-00946.pdf}, state = {published}, DOI = {10.3389/fpsyg.2013.00946}, author = {Heydrich L; Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Aspell JE; Herbelin B; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Blanke O} } @Article{ vonLassbergRMK2013, title = {Neuromuscular onset succession of high level gymnasts during dynamic leg acceleration phases on high bar}, journal = {Journal of Electromyography and Kinesiology}, year = {2013}, month = {10}, volume = {23}, number = {5}, pages = {1124–1130}, abstract = {In several athletic disciplines there is evidence that for generating the most effective acceleration of a specific body part the transfer of momentum should run in a “whip-like” consecutive succession of body parts towards the segment which shall be accelerated most effectively (e.g. the arm in throwing disciplines). This study investigated the question how this relates to the succession of neuromuscular activation to induce such “whip like” leg acceleration in sports like gymnastics with changed conditions concerning the body position and momentary rotational axis of movements (e.g. performing giant swings on high bar). The study demonstrates that during different long hang elements, performed by 12 high level gymnasts, the succession of the neuromuscular activation runs primarily from the bar (punctum fixum) towards the legs (punctum mobile). This demonstrates that the frequently used teaching instruction, first to accelerate the legs for a successful realization of such movements, according to a high level kinematic output, is contradictory to the neuromuscular input patterns, being used in high level athletes, realizing these skills with high efficiency. Based on these findings new approaches could be developed for more direct and more adequate teaching methods regarding to an earlier optimization and facilitation of fundamental movement requirements.}, web_url = {http://www.sciencedirect.com/science/article/pii/S1050641113001752}, state = {published}, DOI = {10.1016/j.jelekin.2013.07.006}, author = {von Lassberg C; Rapp W; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Krug J} } @Article{ LinkenaugerLBM2013, title = {Welcome to Wonderland: The Influence of the Size and Shape of a Virtual Hand On the Perceived Size and Shape of Virtual Objects}, journal = {PLoS ONE}, year = {2013}, month = {7}, volume = {8}, number = {7}, pages = {1-16}, abstract = {The notion of body-based scaling suggests that our body and its action capabilities are used to scale the spatial layout of the environment. Here we present four studies supporting this perspective by showing that the hand acts as a metric which individuals use to scale the apparent sizes of objects in the environment. However to test this, one must be able to manipulate the size and/or dimensions of the perceiver’s hand which is difficult in the real world due to impliability of hand dimensions. To overcome this limitation, we used virtual reality to manipulate dimensions of participants’ fully-tracked, virtual hands to investigate its influence on the perceived size and shape of virtual objects. In a series of experiments, using several measures, we show that individuals’ estimations of the sizes of virtual objects differ depending on the size of their virtual hand in the direction consistent with the body-based scaling hypothesis. Additionally, we found that these effects were specific to participants’ virtual hands rather than another avatar’s hands or a salient familiar-sized object. While these studies provide support for a body-based approach to the scaling of the spatial layout, they also demonstrate the influence of virtual bodies on perception of virtual environments.}, web_url = {http://www.plosone.org/article/fetchObject.action;jsessionid=F5E3535ACC27CBE750FBA3ADA92821A1?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0068594&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0068594}, EPUB = {e68594}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ PiryankovadKBM2013, title = {Egocentric distance perception in large screen immersive displays}, journal = {Displays}, year = {2013}, month = {4}, volume = {34}, number = {2}, pages = {153–164}, abstract = {Many scientists have demonstrated that compared to the real world egocentric distances in head-mounted display virtual environments are underestimated. However, distance perception in large screen immersive displays has received less attention. We investigate egocentric distance perception in a virtual office room projected using a semi-spherical, a Max Planck Institute CyberMotion Simulator cabin and a flat large screen immersive display. The goal of our research is to systematically investigate distance perception in large screen immersive displays with commonly used technical specifications. We specifically investigate the role of distance to the target, stereoscopic projection and motion parallax on distance perception. We use verbal reports and blind walking as response measures for the real world experiment. Due to the limited space in the three large screen immersive displays we use only verbal reports as the response measure for the experiments in the virtual environment. Our results show an overall underestimation of distance perception in the large screen immersive displays, while verbal estimates of distances are nearly veridical in the real world. We find that even when providing motion parallax and stereoscopic depth cues to the observer in the flat large screen immersive display, participants estimate the distances to be smaller than intended. Although stereo cues in the flat large screen immersive display do increase distance estimates for the nearest distance, the impact of the stereoscopic depth cues is not enough to result in veridical distance perception. Further, we demonstrate that the distance to the target significantly influences the percent error of verbal estimates in both the real and virtual world. The impact of the distance to the target on the distance judgments is the same in the real world and in two of the used large screen displays, namely, the MPI CyberMotion Simulator cabin and the flat displays. However, in the semi-spherical display we observe a significantly different influence of distance to the target on verbal estimates of egocentric distances. Finally, we discuss potential reasons for our results. Based on the findings from our research we give general suggestions that could serve as methods for improving the LSIDs in terms of the accuracy of depth perception and suggest methods to compensate for the underestimation of verbal distance estimates in large screen immersive displays.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0141938213000036}, state = {published}, DOI = {10.1016/j.displa.2013.01.001}, author = {Piryankova IV{ivelina}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ StreuberMBd2012, title = {The Influence of Visual Information on the Motor Control of Table Tennis Strokes}, journal = {Presence}, year = {2012}, month = {9}, volume = {21}, number = {3}, pages = {281-294}, abstract = {Theories of social interaction (i.e., common coding theory) suggest that visual information about the interaction partner is critical for successful interpersonal action coordination. Seeing the interaction partner allows an observer to understand and predict the interaction partner's behavior. However, it is unknown which of the many sources of visual information about an interaction partner (e.g., body, end effectors, and/or interaction objects) are used for action understanding and thus for the control of movements in response to observed actions. We used a novel immersive virtual environment to investigate this further. Specifically, we asked participants to perform table tennis strokes in response to table tennis balls stroked by a virtual table tennis player. We tested the effect of the visibility of the ball, the paddle, and the body of the virtual player on task performance and movement kinematics. Task performance was measured as the minimum distance between the center of the paddle and the center of the ball (radial error). Movement kinematics was measured as variability in the paddle speed of repeatedly executed table tennis strokes (stroke speed variability). We found that radial error was reduced when the ball was visible compared to invisible. However, seeing the body and/or the racket of the virtual players only reduced radial error when the ball was invisible. There was no influence of seeing the ball on stroke speed variability. However, we found that stroke speed variability was reduced when either the body or the paddle of the virtual player was visible. Importantly, the differences in stroke speed variability were largest in the moment when the virtual player hit the ball. This suggests that seeing the virtual player's body or paddle was important for preparing the stroke response. These results demonstrate for the first time that the online control of arm movements is coupled with visual body information about an opponent.}, web_url = {http://www.mitpressjournals.org/doi/abs/10.1162/PRES_a_00113}, state = {published}, DOI = {10.1162/PRES_a_00113}, author = {Streuber S{stst}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}} } @Article{ NethSEKBM2011_2, title = {Velocity-Dependent Dynamic Curvature Gain for Redirected Walking}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2012}, month = {7}, volume = {18}, number = {7}, pages = {1041-1052}, abstract = {Redirected walking techniques allow people to walk in a larger virtual space than the physical extents of the laboratory. We describe two experiments conducted to investigate human sensitivity to walking on a curved path and to validate a new redirected walking technique. In a psychophysical experiment, we found that sensitivity to walking on a curved path was significantly lower for slower walking speeds (radius of 10 m versus 22 m). In an applied study, we investigated the influence of a velocity-dependent dynamic gain controller and an avatar controller on the average distance that participants were able to freely walk before needing to be reoriented. The mean walked distance was significantly greater in the dynamic gain controller condition, as compared to the static controller (22 m versus 15 m). Our results demonstrate that perceptually motivated dynamic redirected walking techniques, in combination with reorientation techniques, allow for unaided exploration of a large virtual city model.}, file_url = {fileadmin/user_upload/files/publications/2011/TVCG_Neth_Manuscript_revised.pdf}, web_url = {http://www.computer.org/portal/web/csdl/doi/10.1109/TVCG.2011.275}, state = {published}, DOI = {10.1109/TVCG.2011.275}, author = {Neth CT{neth}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}; Engel D{engel}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Article{ FrankensteinMBM2011, title = {Is the Map in Our Head Oriented North?}, journal = {Psychological Science}, year = {2012}, month = {2}, volume = {23}, number = {2}, pages = {120-125}, abstract = {We examined how a highly familiar environmental space—one’s city of residence—is represented in memory. Twenty-six participants faced a photo-realistic virtual model of their hometown and completed a task in which they pointed to familiar target locations from various orientations. Each participant’s performance was most accurate when he or she was facing north, and errors increased as participants’ deviation from a north-facing orientation increased. Pointing errors and latencies were not related to the distance between participants’ initial locations and the target locations. Our results are inconsistent with accounts of orientation-free memory and with theories assuming that the storage of spatial knowledge depends on local reference frames. Although participants recognized familiar local views in their initial locations, their strategy for pointing relied on a single, north-oriented reference frame that was likely acquired from maps rather than experience from daily exploration. Even though participants had spent significantly more time navigating the city than looking at maps, their pointing behavior seemed to rely on a north-oriented mental map.}, file_url = {fileadmin/user_upload/files/publications/2012/Psychol-Sci-2012-Frankenstein.pdf}, web_url = {http://pss.sagepub.com/content/23/2/120.full.pdf+html}, state = {published}, DOI = {10.1177/0956797611429467}, author = {Frankenstein J{frankenstein}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Article{ LinkenaugerMP2011, title = {Body-based perceptual rescaling revealed through the size-weight illusion}, journal = {Perception}, year = {2011}, month = {10}, volume = {40}, number = {10}, pages = {1251-1253}, abstract = {An embodied approach to the perception of spatial layout contends that the body is used as a ‘perceptual ruler’ with which individuals scale the perceived environmental layout. In support of this notion, previous research has shown that the perceived size of objects can be influenced by changes in the apparent size of hand. The size – weight illusion is a well known phenomenon, which occurs when people lift two objects of equal weight but differing sizes and perceive that the larger object feels lighter. Therefore, if apparent hand size influences perceived object size, it should also influence the object’s perceived weight. In this study, we investigated this possibility by using perceived weight as a measure and found that changes in the apparent size of the hand influence objects’ perceived weight.}, web_url = {http://www.perceptionweb.com/perception/fulltext/p40/p7049.pdf}, state = {published}, DOI = {10.1068/p7049}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Proffitt DR} } @Article{ DoddsMB2011, title = {Talk to the Virtual Hands: Self-Animated Avatars Improve Communication in Head-Mounted Display Virtual Environments}, journal = {PLoS One}, year = {2011}, month = {10}, volume = {6}, number = {10}, pages = {1-12}, abstract = {Background When we talk to one another face-to-face, body gestures accompany our speech. Motion tracking technology enables us to include body gestures in avatar-mediated communication, by mapping one's movements onto one's own 3D avatar in real time, so the avatar is self-animated. We conducted two experiments to investigate (a) whether head-mounted display virtual reality is useful for researching the influence of body gestures in communication; and (b) whether body gestures are used to help in communicating the meaning of a word. Participants worked in pairs and played a communication game, where one person had to describe the meanings of words to the other. Principal Findings In experiment 1, participants used significantly more hand gestures and successfully described significantly more words when nonverbal communication was available to both participants (i.e. both describing and guessing avatars were self-animated, compared with both avatars in a static neutral pose). Participants ‘passed’ (gave up describing) significantly more words when they were talking to a static avatar (no nonverbal feedback available). In experiment 2, participants' performance was significantly worse when they were talking to an avatar with a prerecorded listening animation, compared with an avatar animated by their partners' real movements. In both experiments participants used significantly more hand gestures when they played the game in the real world. Conclusions Taken together, the studies show how (a) virtual reality can be used to systematically study the influence of body gestures; (b) it is important that nonverbal communication is bidirectional (real nonverbal feedback in addition to nonverbal communication from the describing participant); and (c) there are differences in the amount of body gestures that participants use with and without the head-mounted display, and we discuss possible explanations for this and ideas for future investigation.}, file_url = {fileadmin/user_upload/files/publications/2011/Dodds-TalktotheVirtualHands-PLoSOne-2011.pdf}, web_url = {http://www.plosone.org/article/fetchObjectAttachment.action;jsessionid=BB8DF7295C39A064E5FFC9839EC935BD.ambra01?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0025759&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0025759}, EPUB = {e25759}, author = {Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ RuddleVMB2011, title = {The effect of landmark and body-based sensory information on route knowledge}, journal = {Memory & Cognition}, year = {2011}, month = {5}, volume = {39}, number = {4}, pages = {686-699}, abstract = {Two experiments investigated the effects of landmarks and body-based information on route knowledge. Participants made four out-and-back journeys along a route, guided only on the first outward trip and with feedback every time an error was made. Experiment 1 used 3-D virtual environments (VEs) with a desktop monitor display, and participants were provided with no supplementary landmarks, only global landmarks, only local landmarks, or both global and local landmarks. Local landmarks significantly reduced the number of errors that participants made, but global landmarks did not. Experiment 2 used a head-mounted display; here, participants who physically walked through the VE (translational and rotational body-based information) made 36% fewer errors than did participants who traveled by physically turning but changing position using a joystick. Overall, the experiments showed that participants were less sure of where to turn than which way, and journey direction interacted with sensory information to affect the number and types of errors participants made.}, web_url = {http://www.springerlink.com/content/12771128x0716033/fulltext.pdf}, state = {published}, DOI = {10.3758/s13421-010-0054-z}, author = {Ruddle RA{roy}; Volkova E{evolk}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 6123, title = {The Effect of Viewing a Self-Avatar on Distance Judgments in an HMD-Based Virtual Environment}, journal = {Presence: Teleoperators and Virtual Environments}, year = {2010}, month = {6}, volume = {19}, number = {3}, pages = {230-242}, abstract = {Few HMD-based virtual environment systems display a rendering of the user’s own body. Subjectively, this often leads to a sense of disembodiment in the virtual world. We explore the effect of being able to see one’s own body in such systems on an objective measure of the accuracy of one form of space perception. Using an action-based response measure, we found that participants who explored near space while seeing fully-articulated and tracked visual representation of themselves subsequently made more accurate judgments of absolute egocentric distance to locations ranging from 4m to 6m away from where they were standing than did participants who saw no avatar. A non-animated avatar also improved distance judgments, but by a lesser amount. Participants who viewed either animated or static avatars positioned 3m in front of their own position made subsequent distance judgments with similar accuracy to the participants who viewed the equivalent animated or static avatar positioned at their own location. We discuss the implications of these results on theories of embodied perception in virtual environments.}, web_url = {http://www.mitpressjournals.org/doi/pdf/10.1162/pres.19.3.230}, state = {published}, DOI = {10.1162/pres.19.3.230}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Creem-Regehr SH; Thompson WB{wthompson}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 5515, title = {Imagined Self-Motion Differs from Perceived Self-Motion: Evidence from a Novel Continuous Pointing Method}, journal = {PLoS One}, year = {2009}, month = {11}, volume = {4}, number = {11}, pages = {1-11}, abstract = {Background The extent to which actual movements and imagined movements maintain a shared internal representation has been a matter of much scientific debate. Of the studies examining such questions, few have directly compared actual full-body movements to imagined movements through space. Here we used a novel continuous pointing method to a) provide a more detailed characterization of self-motion perception during actual walking and b) compare the pattern of responding during actual walking to that which occurs during imagined walking. Methodology/Principal Findings This continuous pointing method requires participants to view a target and continuously point towards it as they walk, or imagine walking past it along a straight, forward trajectory. By measuring changes in the pointing direction of the arm, we were able to determine participants' perceived/imagined location at each moment during the trajectory and, hence, perceived/imagined self-velocity during the entire movement. The specific pattern of pointing behaviour that was revealed during sighted walking was also observed during blind walking. Specifically, a peak in arm azimuth velocity was observed upon target passage and a strong correlation was observed between arm azimuth velocity and pointing elevation. Importantly, this characteristic pattern of pointing was not consistently observed during imagined self-motion. Conclusions/Significance Overall, the spatial updating processes that occur during actual self-motion were not evidenced during imagined movement. Because of the rich description of self-motion perception afforded by continuous pointing, this method is expected to have significant implications for several research areas, including those related to motor imagery and spatial cognition and to applied fields for which mental practice techniques are common (e.g. rehabilitation and athletics).}, web_url = {http://www.plosone.org/article/fetchObjectAttachment.action;jsessionid=7EC338C2BF98904DAB7CCEA3B5344BA6.ambra02?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0007793&representation=PDF}, state = {published}, DOI = {10.1371/journal.pone.0007793}, EPUB = {e7793}, author = {Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Siegle JH{jsiegle}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Loomis JM{loomis}} } @Article{ 5516, title = {Measurement of instantaneous perceived self-motion using continuous pointing}, journal = {Experimental Brain Research}, year = {2009}, month = {5}, volume = {195}, number = {3}, pages = {429-444}, abstract = {In order to optimally characterize full-body self-motion perception during passive translations, changes in perceived location, velocity, and acceleration must be quantified in real time and with high spatial resolution. Past methods have failed to effectively measure these critical variables. Here, we introduce continuous pointing as a novel method with several advantages over previous methods. Participants point continuously to the mentally updated location of a previously viewed target during passive, full-body movement. High-precision motion-capture data of arm angle provide a measure of a participant’s perceived location and, in turn, perceived velocity at every moment during a motion trajectory. In two experiments, linear movements were presented in the absence of vision by passively translating participants with a robotic wheelchair or an anthropomorphic robotic arm (MPI Motion Simulator). The movement profiles included constant-velocity trajectories, two successive movement intervals separated by a b rief pause, and reversed-motion trajectories. Results indicate a steady decay in perceived velocity during constant-velocity travel and an attenuated response to mid-trial accelerations.}, web_url = {http://springerlink.metapress.com/content/t0625668v1651t45/fulltext.pdf}, state = {published}, DOI = {10.1007/s00221-009-1805-6}, author = {Siegle JH{jsiegle}; Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Loomis JM{loomis}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ vanLiereM2009, title = {Foreword to: Special issue on virtual environments}, journal = {Computers & Graphics}, year = {2009}, month = {4}, volume = {33}, number = {2}, pages = {119}, abstract = {The usage of virtual environments (VEs) in various application fields has significantly increased in recent years. The explosive advancement of commodity graphics hardware, immersive displays and tracking technology allows for the exploration of new approaches in the design of immersive interfaces which increase the level of engagement between the user and the simulated environment. The Eurographics Symposium on Virtual Environments is a forum for dissemination of the latest research results in VEs. The 14th EGVE symposium was held in Eindhoven, The Netherlands on May 29–30, 2008. This issue contains revised and expanded versions of the three best papers selected from the Proceedings of EGVE 2008. One of the challenges in designing effective immersive interfaces is the representation of body position/orientation in the environment and an accurate updating of this representation when the body–environment relationship changes. The paper “Influence of the size of the field-of-view on motion perception” by Pretto et al. presents two experiments that aim to determine the effect of field-of-view on the effectiveness of optical flow for estimation of the perceived amplitude of rotations about the body vertical axis and the perceived speed of forward translations. Results are reported that suggest that large field-of-view are not required to estimate the amplitude of visual rotations about the vertical axis of the body, whereas field-of-views of at least 60° are advisable when speed perception relies on optic flow information. The second paper, “Using Mobile Group Dynamics and Virtual Time to improve teamwork in large-scale Collaborative Virtual Environments” by Dodds et al. addresses how interfaces of Collaborative Virtual Environments can be improved for synchronous and asynchronous collaboration. The authors investigate how teleporting and virtual time affect human behavior in task- and team-specific goals. They found that virtual time simulated communication between the participants. They also found that their multiple views and teleporting functionality did increase communication and allowed for an increase of the distance between participants. It has been said that there is no silver bullet in motion tracking. Indeed, choosing the right motion traker entails a trade-off between different requirements by VE designess. The third paper, “A Simulator-based Approach to Evaluating Optical Trackers” by Smit et al. proposes a simulation framework to evaluate the performance of model-based optical trackers. The framework can be used to evaluate and compare the performance of different trackers under various conditions. These input conditions can model important aspects, such as the interaction task, input device geometry, camera properties and occlusion. In this way, designers of VEs can use the framework to study various conditions affecting optical tracking performance. This special issue brings together original research and contributions in VEs. We expect that it will generate novel and exciting ideas to further this field and sincerely hope you enjoy it.}, web_url = {http://www.sciencedirect.com/science/article/pii/S0097849309000181?via%3Dihub}, state = {published}, DOI = {10.1016/j.cag.2009.02.001}, author = {van Liere R; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Article{ 5583, title = {Circular, linear, and curvilinear vection in a large-screen virtual environment with floor projection}, journal = {Computers and Graphics}, year = {2009}, month = {2}, volume = {33}, number = {1}, pages = {47-58}, abstract = {Vection is defined as the compelling sensation of illusory self-motion elicited by a moving sensory, usually visual, stimulus. This paper presents collected introspective data, user discomfort and perceived speed data for the experience of linear, circular, and curvilinear vection in a large-screen, immersive, virtual environment. As a first step we evaluated the effectiveness of a floor projection on the perception of vection for four trajectories: linear forward, linear backward, circular left, and circular right. The floor projection, which considerably extended the field of view, was found to significantly improve the introspective measures of linear, but not circular, vection experienced in a photo-realistic three-dimensional town. In a second study we investigated the differences between 12 different motion trajectories on the illusion of self-motion. In this study we found that linear translations to the left and right are perceived as the least convincing, while linear down is perceived as the most convincing of the linear trajectories. Second, we found that while linear forward vection is not perceived to be very convincing, curvilinear forward vection is reported to be as convincing as circular vection. In a third and final experiment we investigated the perceived speed for all different trajectories and acquired data based on simulator sickness questionnaires to compute a discomfort factor associated with each type of trajectory. Considering our experimental results, we offer suggestions for increasing the sense of self-motion in simulators and VE applications, specifically to increase the number of curvilinear trajectories (as opposed to linear ones) and, if possible, add floor projection in order to improve the illusory sense of self-motion.}, web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6TYG-4V3SY6B-1-1M&_cdi=5618&_user=29041&_orig=browse&_coverDate=02%2F28%2F2009&_sk=999669998&view=c&wchp=dGLbVzz-zSkzV&md5=1d4}, state = {published}, DOI = {10.1016/j.cag.2008.11.008}, author = {Trutoiu LC{auract}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Schulte-Pelkum J{jsp}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Article{ 4416, title = {Visual flow influences gait transition speed and preferred walking speed}, journal = {Experimental Brain Research}, year = {2007}, month = {8}, volume = {181}, number = {2}, pages = {221-228}, abstract = {It is typically assumed that basic features of human gait are determined by purely biomechanical factors. In two experiments, we test whether gait transition speed and preferred walking speed are also influenced by visual information about the speed of self-motion. The visual flow during treadmill locomotion was manipulated to be slower than, matched to, or faster than the physical gait speed (visual gains of 0.5, 1.0, 2.0). Higher flow rates elicit significantly lower transition speeds for both the Walk-Run and Run-Walk transition, as expected. Similarly, higher flow rates elicit significantly lower preferred walking speeds. These results suggest that visual information becomes calibrated to mechanical or energetic aspects of gait and contributes to the control of locmotor behavior.}, web_url = {http://www.springerlink.com/content/g3w030x75244x072/fulltext.pdf}, state = {published}, DOI = {10.1007/s00221-007-0917-0}, author = {Mohler BJ{mohler}; Thompson WB; Creem-Regehr SH; Pick HL; Warren WH} } @Article{ 4546, title = {Calibration of locomotion resulting from visual motion in a treadmill-based virtual environment}, journal = {ACM Transactions on Applied Perception}, year = {2007}, month = {1}, volume = {4}, number = {1}, pages = {20-32}, abstract = {This paper describes the use of a treadmill-based virtual environment (VE) to investigate the influence of visual motion on locomotion. First, we establish that a computer-controlled treadmill coupled with a wide field of view computer graphics display can be used to study interactions between perception and action. Previous work has demonstrated that humans recalibrate their visually directed actions to changing circumstances in their environment. Using a treadmill VE, we show that recalibration of action is reflected in the real world as a result of manipulating the relation between the visual indication of speed, presented using computer graphics, and the biomechanical speed of walking on a treadmill. We then extend this methodology to investigate whether the recalibration is based on perception of the speed of movement through the world or on the magnitude of optic flow itself. This was done by utilizing two different visual displays, which had essentially the same magnitude of optic flow, but which diffe red in the information present for the speed of forward motion. These results indicate that changes in optic flow are not necessary for recalibration to occur. The recalibration effect is dependent, at least in part, on visual perception of the speed of self-movement.}, web_url = {http://dl.acm.org/citation.cfm?id=1227138}, state = {published}, DOI = {10.1145/1227134.1227138}, author = {Mohler BJ{mohler}; Thompson WB; Creem-Regehr SH; Willemsen P; Pick HL; Rieser JJ} } @Inproceedings{ FlemingMRBB2016, title = {Appealing Avatars from 3D Body Scans: Perceptual Effects of Stylization}, year = {2017}, month = {8}, pages = {175-196}, abstract = {Advances in 3D scanning technology allow us to create realistic virtual avatars from full body 3D scan data. However, negative reactions to some realistic computer generated humans suggest that this approach might not always provide the most appealing results. Using styles derived from existing popular character designs, we present a novel automatic stylization technique for body shape and colour information based on a statistical 3D model of human bodies. We investigate whether such stylized body shapes result in increased perceived appeal with two different experiments: One focuses on body shape alone, the other investigates the additional role of surface colour and lighting. Our results consistently show that the most appealing avatar is a partially stylized one. Importantly, avatars with high stylization or no stylization at all were rated to have the least appeal. The inclusion of colour information and improvements to render quality had no significant effect on the overall perceived appeal of the avatars, and we observe that the body shape primarily drives the change in appeal ratings. For body scans with colour information, we found that a partially stylized avatar was most effective, increasing average appeal ratings by approximately 34%.}, web_url = {https://link.springer.com/chapter/10.1007/978-3-319-64870-5_9}, editor = {Braz, J. , N. Magnenat-Thalmann, P. Richard, L. Linsen, A. Telea, S. Battiato, F. Imai}, publisher = {Springer International Publishing}, address = {Cham, Switzerland}, series = {Communications in Computer and Information Science ; 693}, booktitle = {Computer Vision, Imaging and Computer Graphics: Theory and Applications - 11th International Joint Conference, VISIGRAPP 2016}, event_name = {11th Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP 2016)}, event_place = {Roma, Italy}, state = {published}, ISBN = {978-3-319-64869-9}, DOI = {10.1007/978-3-319-64870-5_9}, author = {Fleming R{rfleming}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Romero J{jromero}; Black MJ{black}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}} } @Inproceedings{ KeilmanndSCMBM2017, title = {Comparing Individual and Collaborative Problem Solving in Environmental Search}, year = {2017}, month = {7}, pages = {650}, abstract = {Collaborative spatial problem solving is an important yet not thoroughly examined task. Participants navigated individually and in dyads through virtual cities of varying complexity. They only saw the environment part visible from their current location from a bird’s eye view map perspective. We recorded missed target locations, overall trajectory length and search time per person until self-indicating whole coverage. Our results show a general increase in missed locations, trajectory length, and search time with the complexity of the environment. These increases differed due to individual and collaborative search. For complex, but not for simple environments individual participants navigated shorter distances, finished earlier, but also missed more target locations than when searching the same environments in collaboration. These results indicate that in complex environments collaborative search is less error prone than individual search, but takes longer. Such initial findings will constrain future theorizing about collaborative spatial problem solving.}, web_url = {https://mindmodeling.org/cogsci2017/}, editor = {Gunzelmann, G. , A. Howes, T. Tenbrink, E. Davelaar}, publisher = {Cognitive Science Society}, address = {Austin, TX, USA}, booktitle = {Computational Foundations of Cognition}, event_name = {39th Annual Meeting of the Cognitive Science Society (CogSci 2017)}, event_place = {London, UK}, state = {published}, ISBN = {978-0-9911967-6-0}, author = {Keilmann F{fkeilmann}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Schwan S; Cress U; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SoykaLSFRM2016, title = {Enhancing stress management techniques using virtual reality}, year = {2016}, month = {7}, pages = {85-88}, abstract = {Chronic stress is one of the major problems in our current fast paced society. The body reacts to environmental stress with physiological changes (e.g. accelerated heart rate), increasing the activity of the sympathetic nervous system. Normally the parasympathetic nervous system should bring us back to a more balanced state after the stressful event is over. However, nowadays we are often under constant pressure, with a multitude of stressful events per day, which can result in us constantly being out of balance. This highlights the importance of effective stress management techniques that are readily accessible to a wide audience. In this paper we present an exploratory study investigating the potential use of immersive virtual reality for relaxation with the purpose of guiding further design decisions, especially about the visual content as well as the interactivity of virtual content. Specifically, we developed an underwater world for head-mounted display virtual reality. We performed an experiment to evaluate the effectiveness of the underwater world environment for relaxation, as well as to evaluate if the underwater world in combination with breathing techniques for relaxation was preferred to standard breathing techniques for stress management. The underwater world was rated as more fun and more likely to be used at home than a traditional breathing technique, while providing a similar degree of relaxation.}, web_url = {http://dl.acm.org/citation.cfm?id=2931017}, editor = {Jain, E. , S. Joerg}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM Symposium on Applied Perception (SAP '16)}, event_place = {Anaheim, CA, USA}, state = {published}, ISBN = {978-1-4503-4383-1}, DOI = {10.1145/2931002.2931017}, author = {Soyka F{fsoyka}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Smallwood J{jsmallwood}{Department Human Perception, Cognition and Action}; Ferguson C{cferguson}{Department Human Perception, Cognition and Action}; Riecke BE{bernie}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ FlemingMRBB2016_2, title = {Appealing Female Avatars from 3D Body Scans: Perceptual Effects of Stylization}, year = {2016}, month = {2}, pages = {335-345}, abstract = {Advances in 3D scanning technology allow us to create realistic virtual avatars from full body 3D scan data. However, negative reactions to some realistic computer generated humans suggest that this approach might not always provide the most appealing results. Using styles derived from existing popular character designs, we present a novel automatic stylization technique for body shape and colour information based on a statistical 3D model of human bodies. We investigate whether such stylized body shapes result in increased perceived appeal with two different experiments: One focuses on body shape alone, the other investigates the additional role of surface colour and lighting. Our results consistently show that the most appealing avatar is a partially stylized one. Importantly, avatars with high stylization or no stylization at all were rated to have the least appeal. The inclusion of colour information and improvements to render quality had no significant effect on the overall perceived appeal of the avatars, and we observe that the body shape primarily drives the change in appeal ratings. For body scans with colour information, we found that a partially stylized avatar was most effective, increasing average appeal ratings by approximately 34%.}, file_url = {fileadmin/user_upload/files/publications/2016/GRAPP-2016-Fleming.pdf}, web_url = {http://dl.acm.org/citation.cfm?id=3021639}, editor = {Magnenat-Thalmann, N. , P. Richard, L. Linsen, A. Telea, S. Battiato, F. Imai, J. Braz}, publisher = {Scitepress}, address = {Setúbal, Portugal}, event_name = {11th Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP 2016)}, event_place = {Roma, Italy}, state = {published}, ISBN = {978-989-758-175-5}, DOI = {10.5220/0005683903330343}, author = {Fleming R{rfleming}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Romero J{jromero}; Black MJ{black}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}} } @Inproceedings{ RauhoftLTKSM2015, title = {Evoking and Assessing Vastness in Virtual Environments}, year = {2015}, month = {9}, pages = {51-54}, abstract = {Many have experienced vastness, the feeling when the visual space seems to extend without limits away from you, making you feel like a small element within the space. For over 200 years, people have been writing about this experience, for example stating that vastness is important to the experience of awe [Mikulak 2015]. Yet vastness has received little attention in empirical research. Specifically, it is unknown which aspects of the visual stimulus contribute to perceived vastness. This may be due to the inherent difficulties in presenting a variety of vast stimuli while varying only specific visual cues. Using virtual reality addresses these difficulties, as this technology provides precise control over the presented visual stimuli. Here we investigate whether the feeling of vastness can be evoked using virtual reality and explore potential objective measures to assess vastness. We used three different measures during this experiment: 1) An avatar height adjustment task where participants had to adjust an avatar to be equivalent to their own height as viewed from a distance, 2) a distance estimation task and 3) a subjective vastness rating task. These tasks were performed in four environments: a plain (used in all subsequent environments for the ground and sky surfaces), a forest, a mountain and the mountain and forest environments combined. Our results indicate that the feeling of vastness can indeed be experienced to various degrees in virtual environments, demonstrating the potential of VR as a tool for exploring the perception of vastness. Yet the results combined suggest that the percept of vastness is a rather complex construct.}, web_url = {http://dl.acm.org/citation.cfm?id=2804425}, editor = {Trutoiu, L. , M. Geuss, S Kull, B. Sanders, R. Mantiuk}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM SIGGRAPH Symposium on Applied Perception (SAP '15)}, event_place = {Tübingen, Germany}, state = {published}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804425}, author = {Rauh\"oft G{grauhoeft}; Leyrer M{leyrer}; Thompson W; Klatzky R{bobby}; Stefanucci J{jstefanucci}; Mohler B{mohler}} } @Inproceedings{ WellerdiekBGSKBM2015, title = {Perception of Strength and Power of Realistic Male Characters}, year = {2015}, month = {9}, pages = {7-14}, abstract = {We investigated the influence of body shape and pose on the perception of physical strength and social power for male virtual characters. In the first experiment, participants judged the physical strength of varying body shapes, derived from a statistical 3D body model. Based on these ratings, we determined three body shapes (weak, average, and strong) and animated them with a set of power poses for the second experiment. Participants rated how strong or powerful they perceived virtual characters of varying body shapes that were displayed in different poses. Our results show that perception of physical strength was mainly driven by the shape of the body. However, the social attribute of power was influenced by an interaction between pose and shape. Specifically, the effect of pose on power ratings was greater for weak body shapes. These results demonstrate that a character with a weak shape can be perceived as more powerful when in a high-power pose.}, file_url = {fileadmin/user_upload/files/publications/2015/SAP-2015-Wellerdiek.pdf}, web_url = {http://dl.acm.org/citation.cfm?id=2804413}, editor = {Trutoiu, L. , M. Geuss, S Kull, B. Sanders, R. Mantiuk}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM SIGGRAPH Symposium on Applied Perception (SAP '15)}, event_place = {Tübingen, Germany}, state = {published}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804413}, author = {Wellerdiek AC{awellerdiek}{Department Human Perception, Cognition and Action}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Geuss MN{mgeuss}{Department Human Perception, Cognition and Action}; Streuber S{stst}{Department Human Perception, Cognition and Action}; Kloos U; Black MJ; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SoykaLKBSM2015, title = {Turbulent Motions Cannot Shake VR}, year = {2015}, month = {3}, day = {25}, pages = {33-40}, abstract = {The International Air Transport Association forecasts that there will be at least a 30% increase in passenger demand for flights over the next five years. In these circumstances the aircraft industry is looking for new ways to keep passengers occupied, entertained and healthy, and one of the methods under consideration is immersive virtual reality. It is therefore becoming important to understand how motion sickness and presence in virtual reality are influenced by physical motion. We were specifically interested in the use of head-mounted displays (HMD) while experiencing in-flight motions such as turbulence. 50 people were tested in different virtual environments varying in their context (virtual airplane versus magic carpet ride over tropical islands) and the way the physical motion was incorporated into the virtual world (matching visual and auditory stimuli versus no incorporation). Participants were subjected to three brief periods of turbulent motions realized with a motion simulator. Physiological signals (postural stability, heart rate and skin conductance) as well as subjective experiences (sickness and presence questionnaires) were measured. None of our participants experienced severe motion sickness during the experiment and although there were only small differences between conditions we found indications that it is beneficial for both wellbeing and presence to choose a virtual environment in which turbulent motions could be plausible and perceived as part of the scenario. Therefore we can conclude that brief exposure to turbulent motions does not get participants sick.}, web_url = {http://ieeevr.org/2015/?q=node/47}, editor = {Höllerer, T. , V. Interrante, A. Lecuyer, J.E. Swan II}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE Virtual Reality (VR 2015)}, event_place = {Arles, France}, state = {published}, ISBN = {978-1-4799-1727-3}, DOI = {10.1109/VR.2015.7223321}, author = {Soyka F{fsoyka}{Department Human Perception, Cognition and Action}; Kokkinara E{ekokkinara}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Slater M; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ PaulM2015, title = {Animated self-avatars in immersive virtual reality for studying body perception and distortions}, year = {2015}, month = {3}, pages = {1-2}, abstract = {So far in my research studies with virtual reality I have focused on using body and hand motion tracking systems in order to animate different 3D self-avatars in immersive virtual reality environments (head-mounted displays or desktop virtual reality). We are using self-avatars to explore the following basic research question: what sensory information is used to perceive ones body dimensions? And the applied question of how we can best create a calibrated selfavatar for efficient use in first-person immersive head-mounted display interaction scenarios. The self-avatar used for such research questions and applications has to be precise, easy to use and enable the virtual hand and body to interact with physical objects. This is what my research has focused on thus far and what I am developing for the completion of my first year of my graduate studies. We plan to use LEAP motion for hand and arm movements and the Moven Inertial Measurement suit for full body tracking and the Oculus DK2 head-mounted display. A several step process of setting up and calibrating an animated self-avatar with full body motion and hand tracking is described in this paper. First, the user’s dimensions will be measured, they will be given a self-avatar with these dimensions, then they will be asked to perform pre-determined actions (i.e. touching objects, walking in a specific trajectory), then we will in real-time estimate how precise the animated body and body parts are relative to the real world reference objects, and finally a scaling of the avatar size or retargetting of the motion is performed in order to meet a specific minimum error requirement.}, web_url = {https://www.researchgate.net/publication/284437866_Animated_self-avatars_in_immersive_virtual_reality_for_studying_body_perception_and_distortions}, event_name = {IEEE VR Doctoral Consortium 2015}, event_place = {Arles, France}, state = {published}, author = {Paul S{spaul}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ GutekunstGSKM2014, title = {A Video Self-avatar Influences the Perception of Heights in an Augmented Reality Oculus Rift}, year = {2014}, month = {12}, day = {9}, pages = {9-12}, abstract = {This paper compares the influence a video self-avatar and a lack of a visual representation of a body have on height estimation when standing at a virtual visual cliff. A height estimation experiment was conducted using a custom augmented reality Oculus Rift hardware and software prototype also described in this paper. The results show a consistency with previous research demonstrating that the presence of a visual body influences height estimates, just as it has been shown to influence distance estimates and affordance estimates.}, web_url = {http://diglib.eg.org/handle/10.2312/ve.20141358.009-012}, editor = {Nojima, T. , D. Reiners, O. Staadt}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, event_name = {International Conference on Artificial Reality and Telexistence, 19th Eurographics Symposium on Virtual Environments (ICAT-EGVE 2014)}, event_place = {Bremen, Germany}, state = {published}, ISBN = {978-3-905674-65-1}, DOI = {10.2312/ve.20141358}, author = {Gutekunst M; Geuss M{mgeuss}{Department Human Perception, Cognition and Action}; Rauhoeft G{grauhoeft}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; Kloos U; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ PiryankovaSRdBM2014, title = {Can I recognize my body's weight? The influence of shape and texture on the perception of self}, journal = {ACM Transactions on Applied Perception}, year = {2014}, month = {8}, day = {9}, volume = {11}, number = {3}, pages = {1-18}, abstract = {The goal of this research was to investigate women’s sensitivity to changes in their perceived weight by altering the body mass index (BMI) of the participants’ personalized avatars displayed on a large-screen immersive display. We created the personalized avatars with a full-body 3D scanner that records both the participants’ body geometry and texture. We altered the weight of the personalized avatars to produce changes in BMI while keeping height, arm length and inseam fixed and exploited the correlation between body geometry and anthropometric measurements encapsulated in a statistical body shape model created from thousands of body scans. In a 2x2 psychophysical experiment, we investigated the relative importance of visual cues, namely shape (own shape vs. an average female body shape with equivalent height and BMI to the participant) and texture (own photo-realistic texture or checkerboard pattern texture) on the ability to accurately perceive own current body weight (by asking them ‘Is the avatar the same weight as you?’). Our results indicate that shape (where height and BMI are fixed) had little effect on the perception of body weight. Interestingly, the participants perceived their body weight veridically when they saw their own photo-realistic texture and significantly underestimated their body weight when the avatar had a checkerboard patterned texture. The range that the participants accepted as their own current weight was approximately a 0.83 to −6.05 BMI% change tolerance range around their perceived weight. Both the shape and the texture had an effect on the reported similarity of the body parts and the whole avatar to the participant’s body. This work has implications for new measures for patients with body image disorders, as well as researchers interested in creating personalized avatars for games, training applications or virtual reality.}, web_url = {http://sap2014.cs.mtu.edu/schedule.php}, event_name = {ACM Symposium on Applied Perception (SAP '14)}, event_place = {Vancouver, Canada}, state = {published}, ISBN = {978-1-4503-3009-1}, DOI = {10.1145/2641568}, author = {Piryankova IV{ivelina}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; Romero J{jromero}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Black MJ{black}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ VolkovaM2014, title = {On-line Annotation System and New Corpora for Fine Grained Sentiment Analysis of Text}, year = {2014}, month = {5}, day = {27}, pages = {74-81}, abstract = {We present a new on-line annotation system that allows participants to perform manual sentiment analysis of coherent texts for emotions, as well as indicate the intensity of the emotion and the emphasis in each phrase. We have developed the following set of emotion categories: amusement, anger, contempt, despair, disgust, excitement, fear, hope, joy, neutral, pride, relief, sadness, shame, surprise. This set greatly expands the boundaries of the often used basic emotion categories and is balanced for positive and negative emotions. Using this new annotation tool and its predecessor version, we have collected two corpora of fairy tale texts manually annotated for emotions on the utterance level. One corpus encompasses 72 texts in German, each annotated by two participants. The other corpus is a work in progress and contains three fairytale texts, each annotated by seven participants. The inter-annotator agreement in both corpora is “fair”. Although annotation conflict resolution strategies can be developed for merging several annotations into one, we suggest that for manual SA, the researchers should aim at recruiting more annotators and use the consensus method for retrieving an annotation based on the opinion of the majority.}, web_url = {http://www.lrec-conf.org/proceedings/lrec2014/index.html}, event_name = {5th International Workshop on Emotion, Social Signals, Sentiment & Linked Open Data (ES³LOD 2014), Satellite of LREC 2014 ELRA}, event_place = {Reykjavik, Iceland}, state = {published}, author = {Volkova E{evolk}; Mohler BJ{mohler}} } @Inproceedings{ D039CruzPLCBSGHVAFBKKKPFSBKKMLSGTOC2014, title = {Demonstration: VR-HYPERSPACE - The innovative use of virtual reality to increase comfort by changing the perception of self and space}, year = {2014}, month = {2}, pages = {167-168}, abstract = {Our vision is that regardless of future variations in the interior of airplane cabins, we can utilize ever-advancing state-of-the-art virtual and mixed reality technologies with the latest research in neuroscience and psychology to achieve high levels of comfort for passengers. Current surveys on passenger's experience during air travel reveal that they are least satisfied with the amount and effectiveness of their personal space, and their ability to work, sleep or rest. Moreover, considering current trends it is likely that the amount of available space is likely to decrease and therefore the passenger's physical comfort during a flight is likely to worsen significantly. Therefore, the main challenge is to enable the passengers to maintain a high level of comfort and satisfaction while being placed in a restricted physical space.}, web_url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6802104}, editor = {Coquillart, S. , K. Kiyokawa, J.E. Swan II, D. Bowman}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE Virtual Reality (VR 2014)}, event_place = {Minneapolis, MN, USA}, state = {published}, ISBN = {978-1-4799-2871-2}, DOI = {10.1109/VR.2014.6802104}, author = {D'Cruz M; Patel H; Lewis L; Cobb S; Bues M; Stefani O; Grobler T; Helin K; Viitaniemi J; Aromaa S; Frohlich B; Beck S; Kunert A; Kulik A; Karaseitanidis I; Psonis P; Frangakis N; Slater M; Bergstrom I; Kilteni K; Kokkinara E; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Soyka F{fsoyka}{Department Human Perception, Cognition and Action}; Gaia E; Tedone D; Olbert M; Cappitelli M} } @Inproceedings{ MullingBMSP2013, title = {Inverse Reinforcement Learning for Strategy Extraction}, year = {2013}, month = {9}, day = {27}, pages = {1-9}, abstract = {In competitive motor tasks such as table tennis, mastering the task is not merely a matter of perfect execution of a specific movement pattern. Here, a higher-level strategy is required in order to win the game. The data-driven identification of basic strategies in interactive tasks, such as table tennis is a largely unexplored problem. In order to automatically extract expert knowledge on effective strategic elements from table tennis data, we model the game as a Markov decision problem, where the reward function models the goal of the task as well as all strategic information. We collect data from players with different playing skills and styles using a motion capture system and infer the reward function using inverse rein- forcement learning. We show that the resulting reward functions are able to distinguish the expert among players with different skill levels as well as different playing styles.}, file_url = {fileadmin/user_upload/files/publications/2013/MLSA-2013-Muelling.pdf}, web_url = {https://dtai.cs.kuleuven.be/events/MLSA13/schedule.php}, event_name = {ECML PKDD 2013 Workshop on Machine Learning and Data Mining for Sports Analytics (MLSA 2013)}, event_place = {Praha, Czech Republic}, state = {published}, author = {M\"ulling K{muelling}{Department Empirical Inference}; Boularias A{boularias}{Department Empirical Inference}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Sch\"olkopf B{bs}{Department Empirical Inference}; Peters J{jrpeters}{Department Empirical Inference}} } @Inproceedings{ KohlerHMSH2012, title = {Recording and Playback of Camera Shake: Benchmarking Blind Deconvolution with a Real-World Database}, year = {2012}, month = {10}, pages = {27-40}, abstract = {Motion blur due to camera shake is one of the predominant sources of degradation in handheld photography. Single image blind deconvolution (BD) or motion deblurring aims at restoring a sharp latent image from the blurred recorded picture without knowing the camera motion that took place during the exposure. BD is a long-standing problem, but has attracted much attention recently, cumulating in several algorithms able to restore photos degraded by real camera motion in high quality. In this paper, we present a benchmark dataset for motion deblurring that allows quantitative performance evaluation and comparison of recent approaches featuring non-uniform blur models. To this end, we record and analyse real camera motion, which is played back on a robot platform such that we can record a sequence of sharp images sampling the six dimensional camera motion trajectory. The goal of deblurring is to recover one of these sharp images, and our dataset contains all information to assess how closely various algorithms approximate that goal. In a comprehensive comparison, we evaluate state-of-the-art single image BD algorithms incorporating uniform and non-uniform blur models.}, web_url = {http://link.springer.com/content/pdf/10.1007%2F978-3-642-33786-4_3.pdf}, editor = {Fitzgibbon, A. , S. Lazebnik, P. Perona, Y. Sato, C. Schmid}, publisher = {Springer}, address = {Berlin, Germany}, series = {Lecture Notes in Computer Science ; 7578}, booktitle = {Computer Vision – ECCV 2012}, event_name = {12th European Conference on Computer Vision}, event_place = {Firenze, Italy}, state = {published}, ISBN = {978-3-642-33785-7}, DOI = {10.1007/978-3-642-33786-4_3}, author = {K\"ohler R{rolfk}; Hirsch M{mhirsch}{Department Empirical Inference}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Sch\"olkopf B{bs}{Department Empirical Inference}; Harmeling S{harmeling}{Department Empirical Inference}} } @Inproceedings{ AlexandrovaRBTKBM2012, title = {Enhancing Medical Communication Training Using Motion Capture, Perspective Taking and Virtual Reality}, year = {2012}, month = {2}, pages = {16-22}, abstract = {The aim of this work is to increase the effectiveness of real world medical training simulations by helping trainees gain a better understanding of the importance of communication and teamwork. Therefore we develop an online application which can be used together with real world simulations to improve training. To produce the online application we reconstructed two real world scenarios (one with students and one with practitioners) in an immersive virtual environment. Our application enables the trainees to view the scenario from different perspectives or to freely explore the environment. We aim to integrate it into the medical student curriculum at the University of Tübingen.}, file_url = {fileadmin/user_upload/files/publications/2012/MMVR-2012-Alexandrova.pdf}, web_url = {http://www.nextmed.com/index.html}, editor = {Westwood, J.D. , S.W. Westwood, L. Felländer-Tsai, R.S. Haluck, R.A. Robb, S. Senger, K.G. Vosburgh}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, booktitle = {Medicine Meets Virtual Reality 19: NextMed}, event_name = {19th Medicine Meets Virtual Reality Conference (MMVR 2012)}, event_place = {Newport Beach, CA, USA}, state = {published}, ISBN = {978-1-61499-021-5}, DOI = {10.3233/978-1-61499-022-2-16}, author = {Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; Rall M; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Tullius G; Kloos C; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ SteinickeWLM2011, title = {Perceptually inspired methods for naturally navigating virtual worlds}, year = {2011}, month = {12}, pages = {18: 1-3}, abstract = {In recent years many advances have enabled users to naturally navigate large-scale graphical worlds. The entertainment industry is increasingly providing visual and body-based cues to users to increase the natural feel of their navigational experience. So far, however, none of the existing solutions fully support the most natural locomotion through virtual worlds. Techniques and technologies which have the advantage of insights into human perceptual sensitivity thus have to be considered. In this context, by far the most natural way to move through the real world is via a full body experience where we receive sensory stimulation to all of our senses, i.e. when walking, running, biking or driving. With some exciting technological advances, people are now beginning to get this same full body sensory experience when navigating computer-generated, three-dimensional environments. Enabling an active and dynamic ability to navigate large-scale virtual scenes is of great interest for many 3D applications demanding locomotion, such as video games, edutainment, simulation, rehabilitation, military, tourism or architecture. Today it is still mostly impossible to freely move through computer generated environments in exactly the same way as the real world. Unnatural and artificial approaches are instead applied, providing only the visual sensation of self-motion. Computer graphics environments were initially restricted to visual displays combined with interaction devices - for example the joystick or mouse - providing often unnatural inputs to generate self-motion. Today, more and more interaction devices like Nintendo Wii, Microsoft Kinect or Sony EyeToy enable intuitive and natural interaction. In this context many research groups are investigating natural, multimodal methods of generating self-motion in virtual worlds based on such consumer hardware.}, file_url = {fileadmin/user_upload/files/publications/2011/ACM-Siggraph-Asia-2011-Steinick.pdf}, web_url = {http://www.siggraph.org/asia2011/}, publisher = {ACM Press}, address = {New York, NY, USA}, booktitle = {SIGGRAPH Asia 2011 Courses (SA '11)}, event_name = {4th ACM SIGGRAPH Conference and Exhibition on Computer Graphics and Interactive Techniques in Asia ( (SIGGRAPH Asia 2011)}, event_place = {Hong Kong, China}, state = {published}, ISBN = {978-1-4503-1135-9}, DOI = {10.1145/2077434.2077449}, author = {Steinicke F; Whitton MC; L{\'e}cuyer A; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ McManusBSdBM2011, title = {The influence of avatar (self and character) animations on distance estimation, object interaction and locomotion in immersive virtual environments}, year = {2011}, month = {8}, pages = {37-44}, abstract = {Humans have been shown to perceive and perform actions differently in immersive virtual environments (VEs) as compared to the real world. Immersive VEs often lack the presence of virtual characters; users are rarely presented with a representation of their own body and have little to no experience with other human avatars/characters. However, virtual characters and avatars are more often being used in immersive VEs. In a two-phase experiment, we investigated the impact of seeing an animated character or a self-avatar in a head-mounted display VE on task performance. In particular, we examined performance on three different behavioral tasks in the VE. In a learning phase, participants either saw a character animation or an animation of a cone. In the task performance phase, we varied whether participants saw a co-located animated self-avatar. Participants performed a distance estimation, an object interaction and a stepping stone locomotion task within the VE. We find no impact of a character animation or a self-avatar on distance estimates. We find that both the animation and the self-avatar influenced task performance which involved interaction with elements in the environment; the object interaction and the stepping stone tasks. Overall the participants performed the tasks faster and more accurately when they either had a self-avatar or saw a character animation. The results suggest that including character animations or self-avatars before or during task execution is beneficial to performance on some common interaction tasks within the VE. Finally, we see that in all cases (even without seeing a character or self-avatar animation) participants learned to perform the tasks more quickly and/or more accurately over time.}, web_url = {http://www.apgv.org/archive/apgv11/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {8th Symposium on Applied Perception in Graphics and Visualization (APGV 2011)}, event_place = {Toulouse, France}, state = {published}, ISBN = {978-1-4503-0889-2}, DOI = {10.1145/2077451.2077458}, author = {McManus EA; Bodenheimer B; Streuber S{stst}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ LeyrerLBKM2011, title = {The influence of eye height and avatars on egocentric distance estimates in immersive virtual environments}, year = {2011}, month = {8}, pages = {67-74}, abstract = {It is well known that eye height is an important visual cue in the perception of apparent sizes and affordances in virtual environments. However, the influence of visual eye height on egocentric distances in virtual environments has received less attention. To explore this influence, we conducted an experiment where we manipulated the virtual eye height of the user in a head-mounted display virtual environment. As a measurement we asked the participants to verbally judge egocentric distances and to give verbal estimates of the dimensions of the virtual room. In addition, we provided the participants a self-animated avatar to investigate if this virtual self-representation has an impact on the accuracy of verbal distance judgments, as recently evidenced for distance judgments accessed with an action-based measure. When controlled for ownership, the avatar had a significant influence on the verbal estimates of egocentric distances as found in previous research. Interestingly, we found that the manipulation of eye height has a significant influence on the verbal estimates of both egocentric distances and the dimensions of the room. We discuss the implications which these research results have on those interested in space perception in both immersive virtual environments and the real world.}, web_url = {http://www.apgv.org/archive/apgv11/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {8th Symposium on Applied Perception in Graphics and Visualization (APGV 2011)}, event_place = {Toulouse, France}, state = {published}, ISBN = {978-1-4503-0889-2}, DOI = {10.1145/2077451.2077464}, author = {Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Kloos U; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ AlexandrovaRBKTBM2011, title = {Animations of Medical Training Scenarios in Immersive Virtual Environments}, year = {2011}, month = {5}, pages = {9-12}, abstract = {Medical training centers often provide various simulations for students and professionals. Their goal is not only to make trainees practice specific scenarios but also to help them effectively transfer the acquired skills to the real world. Having in mind that virtual environments have already been acknowledged for their potential to improve the medical training process, we propose an approach for rapid generation of animated medical scenarios, which can be used as an additional training tool that fits into the time frame of a semester training program.}, file_url = {fileadmin/user_upload/files/publications/2011/CASA-2011-Alexandrova.pdf}, web_url = {http://www.cad.zju.edu.cn/casa2011/program.html}, editor = {Liu, Y. , A. El Rhalibi, L. Li, M. Zhang, Z. Pan}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {2011 Workshop on Digital Media and Digital Content Management (DMDCM)}, event_place = {Hangzhou, Zhejiang, China}, state = {published}, ISBN = {978-1-4577-0271-6}, DOI = {10.1109/DMDCM.2011.64}, author = {Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; Rall M; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Kloos U; Tullius G; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ DoddsMdSB2011, title = {Embodied Interaction in Immersive Virtual Environments with Real Time Self-animated Avatars}, year = {2011}, month = {5}, pages = {132-135}, abstract = {This paper outlines our recent research that is providing users with a 3D avatar representation, and in particular focuses on studies in which the avatar is self-animated in real time. We use full body motion tracking, so when participants move their hands and feet, these movements are mapped onto the avatar. In a recent study (Dodds et al., CASA 2010), we found that a self-animated avatar aided participants in a communication task in a head-mounted display immersive virtual environment (VE). From the perspective of communication, we discovered it was not only important for the person speaking to be self-animated, but also for the person listening to us. Further, we show the potential of immersive VEs for investigating embodied interaction, and highlight possibilities for future research.}, file_url = {fileadmin/user_upload/files/publications/2011/CHI-2011-Dodds.pdf}, web_url = {http://www.antle.iat.sfu.ca/chi2011_EmbodiedWorkshop/}, web_url2 = {http://www.elisevandenhoven.com/publications/antle-chi11wp.pdf}, editor = {Antle A.N. , P. Marshall P, E. Van Den Hoven}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {Workshop Embodied Interaction: Theory and Practice in HCI (CHI 2011)}, event_place = {Vancouver, BC}, state = {published}, author = {Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Streuber S{stst}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ wallravenSMVP2011, title = {The POETICON enacted scenario corpus: A tool for human and computational experiments on action understanding}, year = {2011}, month = {3}, pages = {484-491}, abstract = {A good data corpus lies at the heart of progress in both perceptual/cognitive science and in computer vision. While there are a few datasets that deal with simple actions, creating a realistic corpus for complex, long action sequences that contains also human-human interactions has so far not been attempted to our knowledge. Here, we introduce such a corpus for (inter)action understanding that contains six everyday scenarios taking place in a kitchen / living-room setting. Each scenario was acted out several times by different pairs of actors and contains simple object interactions as well as spoken dialogue. In addition, each scenario was first recorded with several HD cameras and also with motion-capturing of the actors and several key objects. Having access to the motion capture data allows not only for kinematic analyses, but also allows for the production of realistic animations where all aspects of the scenario can be fully controlled. We also present results from a first series of perceptual experiments that show how humans are able to infer scenario classes, as well as individual actions and objects from computer animations of everyday situations. These results can serve as a benchmark for future computational approaches that begin to take on complex action understanding.}, web_url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771446}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {Ninth IEEE International Conference on Automatic Face & Gesture Recognition and Workshops (FG 2011)}, event_place = {Santa Barbara, CA, USA}, state = {published}, ISBN = {978-1-4244-9140-7}, DOI = {10.1109/FG.2011.5771446}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Schultze M{mschultze}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Vatakis A; Pastra K} } @Inproceedings{ NethSEKBM2011, title = {Velocity-Dependent Dynamic Curvature Gain for Redirected Walking}, year = {2011}, month = {3}, pages = {151-158}, abstract = {The aim of Redirected Walking (RDW) is to redirect a person along their path of travel in a Virtual Environment (VE) in order to increase the virtual space that can be explored in a given tracked area. Among other techniques, the user is redirected on a curved real-world path while visually walking straight in the VE (curvature gain). In this paper, we describe two experiments we conducted to test and extend RDW techniques. In Experiment 1, we measured the effect of walking speed on the detection threshold for curvature of the walking path. In a head-mounted display (HMD) VE, we found a decreased sensitivity for curvature for the slowest walking speed. When participants walked at 0.75 m/s, their detection threshold was approximately 0.1m-1 (radius of approximately 10m). In contrast, for faster walking speeds (>;1.0m/s), we found a significantly lower detection threshold of approximately 0.036m-1 (radius of approximately 27m). In Experiment 2, we implemented many well known redirection techniques into one dynamic RDW application. We integrated a large virtual city model and investigated RDW for free exploration. Further, we implemented a dynamic RDW controller which made use of the results from Experiment 1 by dynamically adjusting the applied curvature gain depending on the actual walking velocity of the user. In addition, we investigated the possible role of avatars to slow the users down or make them rotate their heads while exploring. Both the dynamic curvature gain controller and the avatar controller were evaluated in Experiment 2. We measured the average distance that was walked before reaching the boundaries of the tracked area. The mean walked distance was significantly larger in the condition where the dynamic gain controller was applied. This distance increased from approximately 15m for static gains to approximately 22m for dynamic gains. This did not come at the cost of an increase in simulator sickness. Applying the avatar cont roller did reveal an effect on walking distance or simulator sickness.}, file_url = {fileadmin/user_upload/files/publications/2011/VR-2011-Neth.pdf}, web_url = {http://conferences.computer.org/vr/2011/}, editor = {Hirose, M. , B. Lok, A. Majumder, D. Schmalstieg}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE Virtual Reality Conference (VR 2011)}, event_place = {Singapore}, state = {published}, ISBN = {978-1-4577-0039-2}, DOI = {10.1109/VR.2011.5759454}, author = {Neth C{neth}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}; Engel D{engel}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6682, title = {Virtual Storyteller in Immersive Virtual Environments Using Fairy Tales Annotated for Emotion States}, year = {2010}, month = {10}, pages = {65-68}, abstract = {This paper describes the implementation of an automatically generated virtual storyteller from fairy tale texts which were previously annotated for emotion. In order to gain insight into the effectiveness of our virtual storyteller we recorded face, body and voice of an amateur actor and created an actor animation video of one of the fairy tales. We also got the actor's annotation of the fairy tale text and used this to create a virtual storyteller video. With these two videos, the virtual storyteller and the actor animation, we conducted a user study to determine the effectiveness of our virtual storyteller at conveying the intended emotions of the actor. Encouragingly, participants performed best (when compared to the intended emotions of the actor) when they marked the emotions of the virtual storyteller. Interestingly, the actor himself was not able to annotate the animated actor video with high accuracy as compared to his annotated text. This argues that for future work we must have our actors also annotate their body and facial expressions, not just the text, in order to further investigate the effectiveness of our virtual storyteller. This research is a first step towards using our virtual storyteller in real-time immersive virtual environments.}, file_url = {/fileadmin/user_upload/files/publications/Alexandrova_JVRC_authors_version_6682[0].pdf}, web_url = {http://www.interaction-design.org/references/conferences/proceedings_of_the_joint_virtual_reality_conference_of_egve_-_eurovr_-_vec.html}, editor = {Kuhlen, T. , S. Coquillart, V. Interrante}, publisher = {Eurographics Association}, address = {Goslar, Germany}, booktitle = {Virtual Environments 2010}, event_name = {2010 Joint Virtual Reality Conference of EuroVR - EGVE - VEC (JVRC 2010)}, event_place = {Stuttgart, Germany}, state = {published}, ISBN = {978-3-905674-30-9}, DOI = {10.2312/EGVE/JVRC10/065-068}, author = {Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6623, title = {Egocentric distance judgments in a large screen display immersive virtual environment}, year = {2010}, month = {7}, pages = {57-60}, abstract = {People underestimate egocentric distances in head-mounted display virtual environments, as compared to estimates done in the real world. Our work investigates whether distances are still compressed in a large screen display immersive virtual environment, where participants are able to see their own body surrounded by the virtual environment. We conducted our experiment in both the real world using a real room and the large screen display immersive virtual environment using a 3D model of the real room. Our results showed a significant underestimation of verbal reports of egocentric distances in the large screen display immersive virtual environment, while the distance judgments of the real world were closer to veridical. Moreover, we observed a significant effect of distances in both environments. In the real world closer distances were slightly underestimated, while further distances were slightly overestimated. In contrast to the real world in the virtual environment participants overestimated closer distanc es (up to 2.5m) and underestimated distances that were further than 3m. A possible reason for this effect of distances in the virtual environment may be that participants perceived stereo cues differently when the target was projected on the floor versus on the front of the large screen.}, file_url = {/fileadmin/user_upload/files/publications/Alexandrova_APGV_authors_version_6623[0].pdf}, web_url = {http://www.apgv.org/}, editor = {Guttierez, D. , J. Kearney, M. Banks, K. Mania}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {7th Symposium on Applied Perception in Graphics and Visualization (APGV 2010)}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-1-4503-0248-7}, DOI = {10.1145/1836248.1836258}, author = {Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; Teneva PT{pteneva}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6541, title = {A Communication Task in HMD Virtual Environments: Speaker and Listener Movement Improves Communication}, year = {2010}, month = {6}, pages = {1-4}, abstract = {In this paper we present an experiment which investigates the influence of animated real-time self-avatars in immersive virtual environments on a communication task. Further we investigate the influence of 1st and 3rd person perspectives and the influence of tracked speaker and listener. We find that people perform best in our communication task when both the speaker and the listener have an animated self-avatar and when the speaker is in the 3rd person. The more people move the better they perform in the communication task. These results suggest that when two people in a virtual environment are animated then they do use gestures to communicate.}, file_url = {/fileadmin/user_upload/files/publications/casa_final_6541[0].pdf}, web_url = {http://casa2010.inria.fr/}, event_name = {23rd Annual Conference on Computer Animation and Social Agents (CASA 2010)}, event_place = {Saint-Malo, France}, state = {published}, author = {Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 6819, title = {Emotional Perception of Fairy Tales: Achieving Agreement in Emotion Annotation of Text}, year = {2010}, month = {6}, pages = {98-106}, abstract = {Emotion analysis (EA) is a rapidly developing area in computational linguistics. An EA system can be extremely useful in fields such as information retrieval and emotion-driven computer animation. For most EA systems, the number of emotion classes is very limited and the text units the classes are assigned to are discrete and predefined. The question we address in this paper is whether the set of emotion categories can be enriched and whether the units to which the categories are assigned can be more flexibly defined. We present an experiment showing how an annotation task can be set up so that untrained participants can perform emotion analysis with high agreement even when not restricted to a predetermined annotation unit and using a rich set of emotion categories. As such it sets the stage for the development of more complex EA systems which are closer to the actual human emotional perception of text.}, file_url = {/fileadmin/user_upload/files/publications/NAACL-HLT-2010-Volkova_6819[0].pdf}, web_url = {http://dl.acm.org/citation.cfm?id=1860643}, editor = {Inkpen, D. , C. Strapparava}, publisher = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, event_name = {NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text (CAAGET '10)}, event_place = {Los Angeles, CA, USA}, state = {published}, author = {Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Meurers D; Gerdemann D; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5709, title = {Spatial Memory for Highly Familiar Environments}, year = {2009}, month = {8}, pages = {2650-2655}, abstract = {In this experiment we examined orientation dependency in human memory for a highly familiar environmental space. Twenty-seven inhabitants living for at least two years in Tübingen saw a photorealistic virtual model of the city center (Virtual Tübingen) through a head-mounted display. They were teleported to five different initial locations in Virtual Tübingen and asked to point towards well-known target locations. This procedure was repeated in twelve different body-orientations for each of the initial locations. Participants pointed more accurately when oriented northwards regardless of the initial location. We also found a small effect of local orientation. The more participants were aligned with the street leading to the target location the better was their pointing performance. Even though the strong alignment effect with a global orientation is predicted by reference direction theory, this theory does not predict that this global orientation is, first, common for almost all participants, and second, t hat this orientation is north. We discuss our results with respect to well-known theories of spatial memory and speculate that the bias we find for north orientation is due to participants relying on memory of a city map of Tübingen for their pointing response.}, file_url = {/fileadmin/user_upload/files/publications/CogSci2009-Frankenstein_5709[0].pdf}, web_url = {http://csjarchive.cogsci.rpi.edu/proceedings/2009/index.html}, editor = {Taatgen, N. , H. van Rijn, L. Schomaker, J. Nerbonne}, publisher = {Curran}, address = {Red Hook, NY, USA}, event_name = {31st Annual Conference of the Cognitive Science Society (CogSci 2009)}, event_place = {Amsterdam, Netherlands}, state = {published}, ISBN = {978-1-61567-407-7}, author = {Frankenstein J{frankenstein}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5818, title = {Does Brief Exposure to a Self-avatar Affect Common Human Behaviors in Immersive Virtual Environments?}, year = {2009}, month = {4}, pages = {33-36}, abstract = {A plausible assumption is that self-avatars increase the realism of immersive virtual environments (VEs), because self-avatars provide the user with a visual representation of his/her own body. Consequently having a self-avatar might lead to more realistic human behavior in VEs. To test this hypothesis we compared human behavior in VE with and without providing knowledge about a self-avatar with real human behavior in real-space. This comparison was made for three tasks: a locomotion task (moving through the content of the VE), an object interaction task (interacting with the content of the VE), and a social interaction task (interacting with other social entities within the VE). Surprisingly, we did not find effects of a self-avatar exposure on any of these tasks. However, participant’s VE and real world behavior differed significantly. These results challenge the claim that knowledge about the self-avatar substantially influences natural human behavior in immersive VEs.}, file_url = {/fileadmin/user_upload/files/publications/EG2009_5818[0].pdf}, web_url = {http://diglib.eg.org/handle/10.2312/egs.20091042.033-036}, publisher = {European Association for Computer Graphics}, address = {Geneve, Switzerland}, booktitle = {Eurographics 2009}, event_name = {30th Annual Conference of the European Association for Computer Graphics}, event_place = {München, Germany}, state = {published}, DOI = {10.2312/egs.20091042}, author = {Streuber S{stst}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Trutoiu LC{auract}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5323, title = {A psychophysically calibrated controller for navigating through large environments in a limited free-walking space}, year = {2008}, month = {10}, pages = {157-164}, abstract = {Experience indicates that the sense of presence in a virtual environment is enhanced when the participants are able to actively move through it. When exploring a virtual world by walking, the size of the model is usually limited by the size of the available tracking space. A promising way to overcome these limitations are motion compression techniques, which decouple the position in the real and virtual world by introducing imperceptible visual-proprioceptive conflicts. Such techniques usually precalculate the redirection factors, greatly reducing their robustness. We propose a novel way to determine the instantaneous rotational gains using a controller based on an optimization problem. We present a psychophysical study that measures the sensitivity of visual-proprioceptive conflicts during walking and use this to calibrate a real-time controller. We show the validity of our approach by allowing users to walk through virtual environments vastly larger than the tracking space.}, file_url = {/fileadmin/user_upload/files/publications/VRST2008-Engel_5323[0].pdf}, web_url = {http://vrst2008.labri.fr/index.php}, editor = {Feiner, S. , D. Thalmann, P. Guitton, B. Fröhlich, E. Kruijff, M. Hachet}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {15th ACM Symposium on Virtual Reality Software and Technology (VRST 2008)}, event_place = {Bordeaux, France}, state = {published}, ISBN = {978-1-59593-951-7}, DOI = {10.1145/1450579.1450612}, author = {Engel D{engel}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}; Tcheang L{ltcheang}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5414, title = {Learning Perceptual Coupling for Motor Primitives}, year = {2008}, month = {9}, pages = {834-839}, abstract = {Dynamic system-based motor primitives have enabled robots to learn complex tasks ranging from Tennis-swings to locomotion. However, to date there have been only few extensions which have incorporated perceptual coupling to variables of external focus, and, furthermore, these modifications have relied upon handcrafted solutions. Humans learn how to couple their movement primitives with external variables. Clearly, such a solution is needed in robotics. In this paper, we propose an augmented version of the dynamic systems motor primitives which incorporates perceptual coupling to an external variable. The resulting perceptually driven motor primitives include the previous primitives as a special case and can inherit some of their interesting properties. We show that these motor primitives can perform complex tasks such a Ball-in-a-Cup or Kendama task even with large variances in the initial conditions where a skilled human player would be challenged. For doing so, we initialize the motor primitives in the traditional way by imitation learning without perceptual coupling. Subsequently, we improve the motor primitives using a novel reinforcement learning method which is particularly well-suited for motor primitives.}, file_url = {/fileadmin/user_upload/files/publications/IROS2008-Kober_5414[0].pdf}, web_url = {http://www.iros.org/2008}, publisher = {IEEE Service Center}, address = {Piscataway, NJ, USA}, event_name = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2008)}, event_place = {Nice, France}, state = {published}, ISBN = {978-1-4244-2057-5}, DOI = {10.1109/IROS.2008.4650953}, author = {Kober J{kober}{Department Empirical Inference}; Mohler B{mohler}; Peters J{jrpeters}{Department Empirical Inference}} } @Inproceedings{ 5233, title = {A full-body avatar improves egocentric distance judgments in an immersive virtual environment}, year = {2008}, month = {8}, pages = {194-197}, abstract = {A number of investigators have reported that distance judgments in virtual environments (VEs) are systematically smaller than distance judgments made in comparably-sized real environments. Many variables that may contribute to this difference have been investigated but none of them fully explain the distance compression. In this paper we asked whether seeing a fully-articulated visual representation of oneself (avatar) within a virtual environment would lead to more accurate estimations of distance. We found that participants who explored near space without the visual avatar underestimated egocentric distance judgments compared to those who similarly explored near space while viewing a fully-articulated avatar. These results are discussed with respect to the perceptual and cognitive mechanisms that may be involved in the observed effects as well as the benefits of visual feedback in the form of an avatar for VE applications.}, web_url = {http://apgv.local/archive/apgv08/}, editor = {Creem-Regehr, S. H., K. Myszkowski}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {5th Symposium on Applied Perception in Graphics and Visualization (APGV 2008)}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-1-59593-981-4}, DOI = {10.1145/1394281.1394323}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Thompson WB; Creem-Regehr SH} } @Inproceedings{ 5552, title = {Joint and individual walking in an immersive collaborative virtual environment}, year = {2008}, month = {8}, pages = {191}, abstract = {The aim of this experiment was to determine to which extent humans optimize their walking behavior in different conditions while navigating in a virtual maze. In two conditions participants either walked individually or jointly connected - carrying a physical stretcher. The results showed that an extra effort due to the task-required cooperation was split evenly within the group, even though the sensory feedback about the physical and social environment was significantly different for leader (e.g. was not able to see the follower) and follower (e.g. was able to see the leader). These results might indicate the emergence of a joint body: a phenomenon in which two individual action-perception loops are tuned towards each other in order to optimize a common goal.}, web_url = {http://apgv.local/archive/apgv08/}, editor = {Creem-Regehr, S. H., K. Myszkowski}, publisher = {ACM}, address = {New York, NY, USA}, event_name = {5th Symposium on Applied Perception in Graphics and Visualization (APGV 2008)}, event_place = {Los Angeles, California}, state = {published}, ISBN = {978-1-59593-981-4}, DOI = {10.1145/1394281.1394320}, author = {Streuber S{stst}{Department Human Perception, Cognition and Action}; Chatziastros A{astros}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5626, title = {Tricking people into feeling like they are moving when they are not paying attention}, year = {2008}, month = {8}, pages = {190}, abstract = {Vection refers to illusion of self motion in stationary obervers usually by means of moving visual stimuli [Fischer and Kornmüller 1930]. Linear vection naturally occurs when seated in a train and observing another train on an adjacent track start moving. The very compelling but brief illusion happens as observers are not paying particular attention to the environment but are rather "defocused" from the scene. We studied the effect of two visual attention tasks on the perception of linear vection. The results show a significant decrease in vection onset time with an attention task.}, web_url = {http://apgv.local/archive/apgv08/}, editor = {Creem-Regehr, S. H., K. Myszkowski}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {5th Symposium on Applied Perception in Graphics and Visualization (APGV 2008)}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {978-1-59593-981-4}, DOI = {10.1145/1394281.1394319}, author = {Trutoiu LC{auract}; Streuber S{stst}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Schulte-Pelkum J{jsp}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5199, title = {Effect of the size of the field of view on the perceived amplitude of rotations of the visual scene}, year = {2008}, month = {5}, pages = {97-102}, abstract = {Efficient navigation requires a good representation of body position/orientation in the environment and an accurate updating of this representation when the body-environment relationship changes. We tested here whether the visual flow alone - i.e., no landmark - can be used to update this representation when the visual scene is rotated, and whether having a limited horizontal field of view (30 or 60 degrees), as it is the case in most virtual reality applications, degrades the performance as compared to a full field of view. Our results show that (i) the visual flow alone does not allow for accurately estimating the amplitude of rotations of the visual scene, notably giving rise to a systematic underestimation of rotations larger than 30 degrees, and (ii) having more than 30 degrees of horizontal field of view does not really improve the performance. Taken together, these results suggest that a 30 degree field of view is enough to (under)estimate the amplitude of visual rotations when only visual flow information is available, and that landmarks should probably be provided if the amplitude of the rotations has to be accurately perceived.}, web_url = {http://diglib.eg.org/handle/10.2312/EGVE.EGVE08.097-102}, editor = {Van Liere, R. , B.J. Mohler}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, booktitle = {Virtual Environments 2008}, event_name = {14th Eurographics Symposium on Virtual Environments (EGVE 2008)}, event_place = {Eindhoven, Netherlands}, state = {published}, ISBN = {978-3-905674-06-4}, DOI = {10.2312/EGVE/EGVE08/097-102}, author = {Ogier M{mogier}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 5232, title = {Circular, Linear, and Curvilinear Vection in a Large-screen Virtual Environment with Floor Projection}, year = {2008}, month = {3}, pages = {115-120}, abstract = {Vection is defined as the compelling sensation of illusory self- motion elicited by a moving sensory, usually visual, stimulus. This paper presents collected introspective data on the experience of linear, circular, and curvilinear vection. We evaluate the differences between twelve different trajectories and the influence of the floor projection on the illusion of self-motion. All of the simulated self- motions examined are of a constant velocity, except for a brief simulated initial acceleration. First, we find that linear translations to the left and right are perceived as the least convincing, while linear down is perceived as the most convincing of the linear trajectories. Second, we find that the floor projection significantly improves the introspective measures of linear vection experienced in a photorealistic three-dimensional town. Finally, we find that while linear forward vection is not perceived to be very convincing, curvilinear forward vection is reported to be as convincing as circular vection. Considering our experimental results, our suggestions for simulators and VE applications where vection is desirable is to increase the number of curvilinear trajectories (as opposed to linear ones) and, if possible, add floor projection in order to improve the illusory sense of self-motion.}, web_url = {http://conferences.computer.org/vr/2008/prelim/}, editor = {Lin, M. , A. Steed, C. Cruz-Neira}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, event_name = {IEEE Virtual Reality Conference (VR 2008)}, event_place = {Reno, NV, USA}, state = {published}, ISBN = {978-1-4244-1971-5}, DOI = {10.1109/VR.2008.4480760}, author = {Trutoiu LC{auract}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Schulte-Pelkum J{jsp}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4823, title = {Visualization and (Mis)Perceptions in Virtual Reality}, journal = {Tagungsband 10. Workshop Sichtsysteme}, year = {2007}, month = {11}, pages = {10-14}, abstract = {Virtual Reality (VR) technologies are now being widely adopted for use in areas as diverse as surgical and military training, architectural design, driving and flight simulation, psychotherapy, and gaming/entertainment. A large range of visual displays (from desktop monitors and head-mounted displays (HMDs) to large projection systems) are all currently being employed where each display technology offers unique advantages as well as disadvantages. In addition to technical considerations involved in choosing a VR interface, it is also critical to consider perceptual and psychophysical factors concerned with visual displays. It is now widely recognized that perceptual judgments of particular spatial properties are different in VR than in the real world. In this paper, we will provide a brief overview of what is currently known about the kinds of perceptual errors that can be observed in virtual environments (VEs). Subsequently we will outline the advantages and disadvantages of particular visual displays by foc using on the perceptual and behavioral constraints that are relevant for each. Overall, the main objective of this paper is to highlight the importance of understanding perceptual issues when evaluating different types of visual simulation in VEs.}, file_url = {/fileadmin/user_upload/files/publications/Visualization_and_Perception_in_VR_workshop_final_[0].pdf}, web_url = {http://www.shaker.de/Online-Gesamtkatalog-Download/2017.08.31-17.16.28-192.124.26.250-rad10F31.tmp/3-8322-6684-4_INH.PDF}, editor = {Möller, R.}, publisher = {Shaker}, address = {Aachen, Germany}, event_name = {10. Workshop Sichtsysteme: Visualisierung in der Simulationstechnik}, event_place = {Bremen, Germany}, state = {published}, ISBN = {978-3-8322-6684-4}, author = {Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Nusseck H-G{nusseck}{Department Human Perception, Cognition and Action}; Wallraven C{walli}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4544, title = {Gait parameters while walking in a head-mounted display virtual environment and the real world}, year = {2007}, month = {7}, pages = {85-88}, abstract = {Full-body motion tracking data was collected for six subjects during free walking. Each participant was asked to walk to a previously seen target under four experimental conditions: eyes closed within the real world, eyes closed wearing a head-mounted display (HMD), eyes open in the real world, and eyes open wearing a HMD. We report three gait parameters for each of these four conditions: stride length, walking velocity, and head-trunk angle. This data reveals that these gait parameters within a HMD virtual environment (VE) are different than those in the real world. A person wearing a HMD and backpack walks slower, and takes a shorter stride length than they do in a comparable real world condition. In addition, head-trunk angle while walking to a target on the ground plane is lowest when walking with eyes open in a HMD VE.}, file_url = {/fileadmin/user_upload/files/publications/Final_EGVR2007_4544[0].pdf}, web_url = {http://diglib.eg.org/handle/10.2312/PE.VE2007Short.085-088}, editor = {Fröhlich, B. , R. Blach, R. van Liere}, publisher = {Eurographics Association}, address = {Aire-la-Ville, Switzerland}, event_name = {13th Eurographics Symposium on Virtual Environments and 10th Immersive Projection Technology Workshop (IPT-EGVE 2007)}, event_place = {Weimar, Germany}, state = {published}, ISBN = {978-3-905673-64-7}, DOI = {10.2312/PE/VE2007Short/085-088}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; Campos J{camposjl}{Department Human Perception, Cognition and Action}; Weyel M{weyel}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4655, title = {Orthographic and perspective projection influences linear vection in large screen virtual environments}, year = {2007}, month = {7}, pages = {145}, abstract = {Vection is defined as the visually induced illusion of self motion [Fischer and Kornmüller 1930]. Previous research has suggested that linear vection (the illusion of self-translation) is harder to achieve than circular vection (the illusion of self-rotation) in both laboratory settings (typically using 2D stimuli such as black and white stripes) [Rieser 2006] and virtual environment setups [Schulte-Pelkum 2007; Mohler et al. 2005]. In real a life situation when experiencing circular vection all objects rotate around the observer with the same angular velocity. For linear motion, however, the change in the oberver position results in a change in the observed position of closer objects with respect to farther away objects or the background. This phenomenon, motion parallax, provides pictorial depth cues as closer objects appear to be moving faster compared to more distant objects.}, file_url = {/fileadmin/user_upload/files/publications/apgv07-145_[0].pdf}, web_url = {http://www.apgv.org/archive/apgv07/index.html}, editor = {Wallraven, C. , V. Sundstedt}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {4th Symposium on Applied Perception in Graphics and Visualization (APGV 2007)}, event_place = {Tübingen, Germany}, state = {published}, ISBN = {978-1-59593-670-7}, DOI = {10.1145/1272582.1272622}, author = {Trutoiu LC{auract}; Marin S-D; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Fennema C} } @Inproceedings{ 4509, title = {The contributions of visual flow and locomotor cues to walked distance estimation in a virtual environment}, year = {2007}, month = {7}, pages = {146}, abstract = {Traversed distance perception involves estimating the extent of self-motion as one travels from one position in space to another. As such, it is a multi-modal experience in which information from both visual flow and locomotor cues (i.e. proprioceptive, efference copy and vestibular cues) jointly specify the magnitude of self-motion. While recent evidence has demonstrated the extent to which each of these cues can be used independently to estimate traversed distance, relatively little is known about how they are integrated when simultaneously present. Evaluating multi-modal cue integration in the context of dynamic locomotor behaviour is important to both understanding issues related to self-motion perception, as well as perceptual-motor coupling in real and virtual environments.}, file_url = {/fileadmin/user_upload/files/publications/apgv07-146_4509[0].pdf}, web_url = {http://www.apgv.org/archive/apgv07/}, editor = {Wallraven, C. , V. Sundstedt}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {4th Symposium on Applied Perception in Graphics and Visualization (APGV 2007)}, event_place = {Tübingen, Germany}, state = {published}, ISBN = {978-1-59593-670-7}, DOI = {10.1145/1272582.1272623}, author = {Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Butler JS{butler}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ MohlerCT2006, title = {The influence of feedback on egocentric distance judgments in real and virtual environments}, year = {2006}, month = {7}, pages = {9-14}, abstract = {A number of investigators have reported that distance judgments in virtual environments (VEs) are systematically smaller than distance judgments made in comparably-sized real environments. Many variables that may contribute to this difference have been investigated but none of them fully explain the distance compression. One approach to this problem that has implications for both VE applications and the study of perceptual mechanisms is to examine the influence of the feedback available to the user. Most generally, we asked whether feedback within a virtual environment would lead to more accurate estimations of distance. Next, given the prediction that some change in behavior would be observed, we asked whether specific adaptation effects would generalize to other indications of distance. Finally, we asked whether these effects would transfer from the VE to the real world. All distance judgments in the head-mounted display (HMD) became near accurate after three different forms of feedback were given within the HMD. However, not all feedback sessions within the HMD altered real world distance judgments. These results are discussed with respect to the perceptual and cognitive mechanisms that may be involved in the observed adaptation effects as well as the benefits of feedback for VE applications.}, web_url = {http://dl.acm.org/citation.cfm?id=1140493}, editor = {Fleming, R.W. , S. Kim}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {3rd Symposium on Applied Perception in Graphics and Visualization (APGV 2006)}, event_place = {Boston, MA, USA}, state = {published}, ISBN = {1-59593-429-4}, DOI = {10.1145/1140491.1140493}, author = {Mohler BJ{mohler}; Creem-Regehr SH; Thompson WB} } @Inproceedings{ 3489, title = {Measuring Vection in a Large Screen Virtual Environment}, year = {2005}, month = {8}, pages = {103-109}, abstract = {This paper describes the use of a large screen virtual environment to induce the perception of translational and rotational self-motion. We explore two aspects of this problem. Our first study investigates how the level of visual immersion (seeing a reference frame) affects subjective measures of vection. For visual patterns consistent with translation, self-reported subjective measures of self-motion were increased when the floor and ceiling were visible outside of the projection area. When the visual patterns indicated rotation, the strength of the subjective experience of circular vection was unaffected by whether or not the floor and ceiling were visible. We also found that circular vection induced by the large screen display was reported subjectively more compelling than translational vection. The second study we present describes a novel way in which to measure the effects of displays intended to produce a sense of vection. It is known that people unintentionally drift forward if asked to run in place while blindfolded and that adaptations involving perceived linear self-motion can change the rate of drift. We showed for the first time that there is a lateral drift following perceived rotational self-motion and we added to the empirical data associated with the drift effect for translational self-motion by exploring the condition in which the only self-motion cues are visual.}, file_url = {/fileadmin/user_upload/files/publications/mohler-etal-apgv-2005_3489[0].pdf}, web_url = {http://portal.acm.org/citation.cfm?id=1080421}, editor = {Bülthoff, H.H., T. Troscianko}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {2nd Symposium on Applied Perception in Graphics and Visualization (APGV 2005)}, event_place = {La Coruña, Spain}, state = {published}, ISBN = {1-59593-139-2}, DOI = {10.1145/1080402.1080421}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Riecke BE{bernie}{Department Human Perception, Cognition and Action}; Thompson WB; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inproceedings{ 4548, title = {Investigations on the interactions between vision and locomotion using a treadmill virtual environment}, year = {2005}, month = {1}, pages = {481-492}, abstract = {Treadmill-based virtual environments have the potential to allow near natural locomotion through large-scale simulated spaces. To be effective, such devices need to provide users with visual and biomechanical sensations of walking that are sufficiently accurate to evoke perception-action couplings comparable to those occurring in the real world. We are exploring this problem using a custom built, computer controlled treadmill with a 6' by 10' walking surface, coupled to computer graphics presented on wide field-of-view back projection screens. The system has the added feature of being able to apply forces to the user to simulate walking on slopes and the effects of changes in walking speed. We have demonstrated the effectiveness of this system by showing that the perceptual-motor calibration of human locomotion in the real world can be altered by prior walking on the treadmill virtual environment when the visual flow associated with self-motion is mismatched relative to biomechanical walking speed. The perceptual-motor coupling that we have achieved is sufficient to allow investigation of a number of open questions, including the effect of walking on slopes on the visual estimation of slant and visual influences on gait and walking speed.}, web_url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/5666/1/481_1}, editor = {Rogowitz, B.E. , T.N. Pappas, S.J. Daly}, publisher = {SPIE}, address = {Bellingham, WA, US}, series = {Proceedings of the SPIE ; 5666}, booktitle = {Human Vision and Electronic Imaging X}, event_name = {Electronic Imaging: Science and Technology}, event_place = {San Jose, CA, USA}, state = {published}, ISBN = {0-8194-5639-X}, DOI = {10.1117/12.610861}, author = {Thompson WB; Mohler BJ{mohler}; Creem-Regehr SH; Willemsen P} } @Inproceedings{ 4549, title = {Visual Motion Influences Locomotion in a Treadmill Virtual Environment}, year = {2004}, month = {8}, pages = {19-22}, abstract = {This paper describes the use of a treadmill-based virtual environment (VE) to investigate the influence of visual motion on locomotion. First, we demonstrate that a computer-controlled treadmill coupled with a wide field of view computer graphics display can be used to study interactions between perception and action. Previous work has demonstrated that humans calibrate their visually-directed actions to changing circumstances in their environment. Using a treadmill VE, we show that calibration of action is reflected in the real world as a result of manipulating the relation between the speed of visual flow, presented using computer graphics, and the speed of walking on a treadmill. Second, we extend the methodology in our treadmill VE to investigate an open question involving human gait transitions and show that the speed of visual motion influences the speed at which the gait transition occurs. These results demonstrate both the effectiveness of treadmill-based VEs in simulating the perceptual-motor effects of walking through the real world and the value of such systems in addressing basic perceptual questions that would otherwise be difficult to explore.}, web_url = {http://dl.acm.org/citation.cfm?id=1012554}, editor = {Interrante, V.L. A. McNamara, H.H. Bülthoff, H.E. Rushmeier}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {1st Symposium on Applied Perception in Graphics and Visualization (APGV 2004)}, event_place = {Los Angeles, CA, USA}, state = {published}, ISBN = {1-58113-914-4}, DOI = {10.1145/1012551.1012554}, author = {Mohler B{mohler}; Thompson WB; Creem-Regehr SH; Pick HL; Scholes J; Rieser JJ; Willemsen P} } @Inproceedings{ WebsterHMBRBD2003, title = {A Haptic Surgical Simulator for Operative Setup and Exposure in Laparoscopic Cholecystectomy}, year = {2003}, month = {1}, pages = {1-5}, abstract = {This paper describes a laparoscopic cholecystectomy surgical training software system we have developed using the Immersion Laparoscopic Surgical Workstation ™ and the Verefi Technologies Inc USB 30 degree Endoscopic Ecamera™ system hardware. The trainer is designed to train and test for many laparoscopic skills such as: manipulation of the laparoscope, grasping and stretching the gallbladder to expose the cystic duct, clip application to the cystic duct, cutting the cystic duct, and removing the gallbladder from the abdomen. Simulated patient breathing is accomplished by using a texture motion algorithm. The gallbladder, cystic duct and bile ducts are stretched and compressed using Hookes' law of F=-kx within a mass-springs model. The intent is to provide an effective method to learn the laparoscopic cholecystectomy procedure using a haptic surgical simulator.}, web_url = {https://www.researchgate.net/publication/266522599_A_Haptic_Surgical_Simulator_for_Operative_Setup_and_Exposure_in_Laparoscopic_Cholecystectomy}, event_name = {11th Annual Medicine Meets Virtual Reality Conference (MMVR 2003)}, event_place = {Newport Beach, CA, USA}, state = {published}, author = {Webster R; Haluck RS; Mohler B{mohler}; Boyd J; Reeser J; Benson A; DeSanto D} } @Inproceedings{ 4550, title = {Elastically Deformable 3D Organs for Haptic Surgical Simulators}, year = {2002}, month = {1}, pages = {570-572}, abstract = {This paper describes a technique for incorporating real-time elastically deformable 3D organs in haptic surgical simulators. Our system is a physically based particle model utilizing a mass-springs-damper connectivity with an implicit predictor to speed up calculations during each time step. The solution involves repeated application of Newton's 2nd Law of motion: F = ma using an implicit solver for numerically solving the differential equations.}, web_url = {http://booksonline.iospress.nl/Content/View.aspx?piid=26830}, editor = {Westwood, J.D. , H. Miller Hoffman, R.A. Robb, D. Stredney}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, event_name = {Medicine Meets Virtual Reality Conference (MMVR 2002)}, event_place = {Newport Beach, CA, USA}, state = {published}, ISBN = {978-1-58603-203-6}, DOI = {10.3233/978-1-60750-929-5-570}, author = {Webster RW; Haluck R; Mohler B{mohler}; Ravenscrogt R; Crouthamel E; Frack T; Terlecki S; Shaeffer J} } @Inproceedings{ 4552, title = {A Virtual Reality Surgical Trainer for Navigation in Laparoscopic Surgery Trainer}, year = {2001}, month = {1}, pages = {171-176}, abstract = {A virtual reality trainer was designed to familiarize students and surgeons with surgical navigation using an angled laparoscopic lens and camera system. Previous laparoscopic trainers have been devoted to task or procedure training. Our system is exclusively devoted to laparoscope manipulation and navigation. Laparoscopic experts scored better than novices in this system suggesting construct validity. The trainer received favorable subjective ratings. This simulator may provide for improved navigation in the operating room and become a useful tool for residents and practicing surgeons.}, web_url = {http://ebooks.iospress.nl/volume/medicine-meets-virtual-reality-2001}, editor = {Westwood, J.D. , H.M. Hoffman, G.T. Mogel, D. Stredney, R.A. Robb}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, series = {Studies in Health Technology and Informatics ; 81}, booktitle = {Medicine Meets Virtual Reality 2001: Outer Space, Inner Space, Virtual Space}, event_name = {9th Medicine Meets Virtual Reality Conference (MMVR 2001)}, event_place = {Newport Beach, CA, USA}, state = {published}, ISBN = {978-1-58603-143-5}, DOI = {10.3233/978-1-60750-925-7-171}, author = {Haluck R; Webster R; Snyder A; Melkonian M; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Dise M; LeFever A} } @Inproceedings{ 4551, title = {Prototype Haptic Suturing Simulator}, year = {2001}, month = {1}, pages = {567-569}, abstract = {A new haptic simulation designed to teach basic suturing for simple wound closure is described. Needle holders are attached to the haptic device as the graphics of the needle holders, needle, sutures and virtual skin are displayed and updated in real time. The simulator incorporates several interesting components such as real-time modeling of deformable skin, tissue and suture material and real-time recording of state of activity during the task using a finite state model.}, web_url = {http://ebooks.iospress.nl/volume/medicine-meets-virtual-reality-2001}, editor = {Westwood, J.D. , H.M. Hoffman, G.T. Mogel, D. Stredney, R.A. Robb}, publisher = {IOS Press}, address = {Amsterdam, Netherlands}, series = {Studies in Health Technology and Informatics ; 81}, booktitle = {Medicine Meets Virtual Reality 2001: Outer Space, Inner Space, Virtual Space}, event_name = {9th Medicine Meets Virtual Reality Conference (MMVR 2001)}, event_place = {Newport Beach, CA, USA}, state = {published}, ISBN = {978-1-58603-143-5}, DOI = {10.3233/978-1-60750-925-7-567}, author = {Webster R; Zimmerman D; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Melkonian M; Haluck R} } @Inbook{ WelchM2014, title = {Adapting to Virtual Environments}, year = {2014}, pages = {627-646}, web_url = {http://www.crcnetbase.com/doi/abs/10.1201/b17360-31}, editor = {Hale, K.S. , K.M . Stanney}, publisher = {CRC Press}, address = {Boca Raton, FL, USA}, edition = {2.}, booktitle = {Handbook of Virtual Environments: Design, Implementation, and Applications}, state = {published}, ISBN = {978-1-4665-1184-2}, DOI = {10.1201/b17360-31}, author = {Welch RB; Mohler BJ{mohler}} } @Inbook{ MohlerdB2012, title = {Multisensory contributions to spatial perception}, year = {2012}, month = {10}, pages = {81-97}, abstract = {How do we know where environmental objects are located with respect to our body? How are we are able to navigate, manipulate, and interact with the environment? In this chapter, we describe how capturing sensory signals from the environment and performing internal computations achieve such goals. The first step, called early or low-level processing, is based on the functioning of feature detectors that respond selectively to elementary patterns of stimulation. Separate organs capture sensory signals and then process them separately in what we normally refer to as senses: smell, taste, touch, audition, and vision. In the first section of this chapter, we present the sense modalities that provide sensory information for the perception of spatial properties such as distance, direction, and extent. Although it is hard to distinguish where early processing ends and high-level perception begins, the rest of the chapter focuses on the intermediate level of processing, which is implicitly assumed to be the a key component of several perceptual and computational theories (Gibson, 1979; Marr, 1982) and for the visual modality has been termed mid-level vision (see Nakayama, He, & Shimojo, 1995). In particular, we discuss the ability of the perceptual system to specify the position and orientation of environmental objects relative to other objects and especially relative to the observer’s body. We present computational theories and relevant scientific results on individual sense modalities and on the integration of sensory information within and across the sensory modalities. Finally, in the last section of this chapter, we describe how the information processing approach has enabled a better understanding of the perceptual processes in relation to two specific high-level perceptual functions: self-orientation perception and object recognition.}, web_url = {http://psycnet.apa.org/record/2012-08674-005}, editor = {Waller, D. , L. Nadel}, publisher = {American Psychological Association}, address = {Washington, DC, USA}, booktitle = {Handbook of Spatial Cognition}, state = {published}, ISBN = {978-1-433-81204-0}, DOI = {10.1037/13936-005}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Di Luca M{max}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Inbook{ 6234, title = {Imitation and Reinforcement Learning for Motor Primitives with Perceptual Coupling}, year = {2010}, month = {1}, pages = {209-225}, abstract = {Traditional motor primitive approaches deal largely with open-loop policies which can only deal with small perturbations. In this paper, we present a new type of motor primitive policies which serve as closed-loop policies together with an appropriate learning algorithm. Our new motor primitives are an augmented version version of the dynamical system-based motor primitives [Ijspeert et al(2002)Ijspeert, Nakanishi, and Schaal] that incorporates perceptual coupling to external variables. We show that these motor primitives can perform complex tasks such as Ball-in-a-Cup or Kendama task even with large variances in the initial conditions where a skilled human player would be challenged. We initialize the open-loop policies by imitation learning and the perceptual coupling with a handcrafted solution. We first improve the open-loop policies and subsequently the perceptual coupling using a novel reinforcement learning method which is particularly well-suited for dynamical system-based motor primitives.}, file_url = {/fileadmin/user_upload/files/publications/Imitation%20and%20Reinforcement%20Learning%20for%20Motor%20Primitives%20with%20Perceptual%20Coupling_6234[0].pdf}, web_url = {http://springerlink.com/content/j386700827247624/?p=d2eb14c459a648b6857fa8227d3bb196&pi=9}, editor = {Sigaud, O. , J. Peters}, publisher = {Springer}, address = {Berlin, Germany}, series = {Studies in Computational Intelligence ; 264}, booktitle = {From Motor Learning to Interaction Learning in Robots}, state = {published}, ISBN = {978-3-642-05181-4}, DOI = {10.1007/978-3-642-05181-4_10}, author = {Kober J{kober}{Department Empirical Inference}; Mohler B{mohler}; Peters J{jrpeters}{Department Empirical Inference}} } @Poster{ HanriederMM2017, title = {Collaborative Spatial Search- Implementation and Validation of a Multi-User Task in Walkable Virtual Environments}, year = {2017}, month = {11}, web_url = {http://www.kyb.tuebingen.mpg.de/research/rg/mohlergroup/GIVRAR2017.html}, event_name = {14. GI VR/AR-Workshop}, event_place = {Tübingen, Germany}, state = {published}, author = {Hanrieder M{mhanrieder}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Poster{ SchirmMT2017, title = {Selbstavatare in virtueller Realität: Automatisierte Validierung von Körperform und Animation mit wenigen Markern}, year = {2017}, month = {11}, web_url = {http://www.kyb.tuebingen.mpg.de/research/rg/mohlergroup/GIVRAR2017.html}, event_name = {14. GI VR/AR-Workshop}, event_place = {Tübingen, Germany}, state = {published}, author = {Schirm J{jschirm}; Mohler B{mohler}; Thaler A{athaler}} } @Poster{ MolbertTMSBKZG2017_2, title = {Assessing body image disturbance in patients with anorexia nervosa using biometric self-avatars in virtual reality: attitudinal components rather than visual body size estimation are distorted}, year = {2017}, month = {9}, day = {15}, abstract = {Aims: Body image disturbance (BID) is a core symptom of Anorexia Nervosa (AN), but as yet its distinctive features are unknown. Here we use individual 3D-avatars in virtual reality to investigate contributions of weight perception and evaluation to BID. Method: We investigated n=24 women with AN and n=24 healthy controls. Based on 3D body scans, we created individual avatars for each participant. Each avatar was biometrically manipulated to gradually represent +/- 20% of the participant’s weight. Avatars were presented on a stereoscopic life-size screen and participants had to identify/adjust their current and desired body weight. Additionally, eating pathology, body dissatisfaction and self-esteem were assessed. Results: Both groups underestimated their weight, with a trend that women with AN underestimated more than controls. Both groups indicated a desired weight lower than their actual weight, and in percent of own body weight, controls even more so. Of note, the average desired body of women with AN was severely underweight, while the control’s desired body was normal weight. Correlation analyses revealed that desired body size, but not accuracy of body size estimation, was associated with eating disorder symptoms. Conclusions: Our results contradict the widespread assumption that BID is driven by overestimation and emphasize the role of attitudinal components for BID. According to our observations, clinical interventions should target a change in desired weight.}, web_url = {http://edresearchsociety.org/web/program-full.php?dayshow=3&displayday=3}, web_url2 = {http://edresearchsociety.org/EDRS_ONLINE/mobile/show_presentation.php?abstractno=168}, event_name = {XXIIIrd Annual Meeting of the Eating Disorders Research Society (EDRS 2017)}, event_place = {Leipzig, Germany}, state = {published}, author = {M\"olbert SC{smoelbert}; Thaler A{athaler}; Mohler B{mohler}; Streuber S{stst}; Black MJ{black}; Karnath H-O; Zipfel S; Giel KE} } @Poster{ FosterZRBMBB2017, title = {Decoding categories shared by the face and body}, year = {2017}, month = {8}, day = {31}, web_url = {http://www.ecvp.org/2017/programme.html}, event_name = {40th European Conference on Visual Perception (ECVP 2017)}, event_place = {Berlin, Germany}, state = {published}, author = {Foster C{cfoster}{Department Human Perception, Cognition and Action}; Zhao M{mzhao}{Department Human Perception, Cognition and Action}; Romero J{jromero}; Black M{black}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Bartels A{abartels}{Department Physiology of Cognitive Processes}; B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}} } @Poster{ VanderVeerLAWBM2017, title = {Where am I? Pointing to myself and body parts in virtual reality}, year = {2017}, month = {8}, day = {30}, web_url = {http://www.ecvp.org/2017/programme.html}, event_name = {40th European Conference on Visual Perception (ECVP 2017)}, event_place = {Berlin, Germany}, state = {published}, author = {Van der Veer A{aveer}; Longo M; Alsmith A; Wong HY; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ ThalerPGSdSRBM2017, title = {Gender differences in visual perception of own body weight}, year = {2017}, month = {8}, day = {29}, web_url = {http://www.ecvp.org/2017/programme.html}, event_name = {40th European Conference on Visual Perception (ECVP 2017)}, event_place = {Berlin, Germany}, state = {published}, author = {Thaler A{athaler}{Department Human Perception, Cognition and Action}; Piryankova I{ivelina}{Department Human Perception, Cognition and Action}; Geuss MN{mgeuss}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Streuber S{stst}{Department Human Perception, Cognition and Action}; Romero J{jromero}; Black MJ{black}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ ThalerGSMGBM2017, title = {Perception of others’ body sizes is predicted by own body size}, journal = {Journal of Vision}, year = {2017}, month = {8}, volume = {17}, number = {10}, pages = {843}, abstract = {Previous research demonstrated that estimates of others’ body sizes are biased towards the average body size in the population (Cornelissen, Gledhill, Cornelissen & Tovée, 2016). Bodies in the environment not only influence the internal reference of what is perceived as average or “normal”, but also play an essential role in self-body size evaluation via social comparison (Cattarin, Thompson, Thomas & Williams, 2000). In two psychophysical experiments, we asked whether there is also an influence of own body size on the perception of others’ body sizes. For Experiment 1, four biometric female avatars with a body mass index (BMI) of 15, 25, 35, and 45 were generated, and then their weight was altered (± 5, ±10, ±15, and ±20% BMI change) based on a statistical body model. For each of the avatar series, female participants spanning the BMI range memorized what the avatar’s body looked like and then responded for the presented bodies varying in weight whether it was the same as the one memorized. Results showed no influence of participants’ BMI on the accuracy of body size estimates, but sensitivity to weight changes was highest for bodies close to one’s own BMI. In Experiment 2, we examined whether this effect was driven by memory or perceptual factors. Specifically, in a 2-alternative forced choice discrimination task, two bodies were presented simultaneously using the same BMI categories as in Experiment 1. If participants’ body size influences sensitivity during simultaneous presentation, it would suggest that the effect found in Experiment 1 is not due to a better memorization of bodies that are close to one’s own body size. Again, sensitivity to differences in body weight was highest for bodies close to one’s own BMI. These results suggest that our own body size influences our perceptual ability to discriminate the sizes of other’s bodies.}, web_url = {http://www.visionsciences.org/programs/VSS_2017_Abstracts.pdf}, event_name = {17th Annual Meeting of the Vision Sciences Society (VSS 2017)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/17.10.843}, author = {Thaler A{athaler}; Geuss M{mgeuss}; Stefanucci J{jstefanucci}; M\"olbert S{smoelbert}; Giel K; Black M{black}; Mohler B{mohler}} } @Poster{ VanderVeerLAWM2017, title = {Where am I in virtual reality?}, year = {2017}, month = {3}, web_url = {http://www.mind-and-brain.de/people/einstein-visiting-fellows/vittorio-gallese/symposium-2017-me-i/}, event_name = {Symposium Me & I: Conceptual and Empirical Perspectives on the Self}, event_place = {Berlin, Germany}, state = {published}, author = {van der Veer AH{aveer}; Longo MR; Alsmith AJT; Wong HJ; Mohler BJ{mohler}} } @Poster{ VanderVeerLAWM2017_2, title = {Where am I in virtual reality?}, year = {2017}, month = {3}, web_url = {http://www.mindbrainbody.de/}, event_name = {5th Mind, Brain & Body Symposium (MBBS 2017)}, event_place = {Berlin, Germany}, state = {published}, author = {van der Veer AH{aveer}; Longo MR; Alsmith AJT; Wong HJ; Mohler BJ{mohler}} } @Poster{ DobrickiM2016, title = {Body schema boundaries are formed by sensorimotor body-environment distinction}, year = {2016}, month = {11}, day = {15}, number = {555.02}, abstract = {The basic self-perception of being a body that is delimited and in this sense distinct from the environment should be defined by the boundaries of the body schema. The body schema was and is extensively investigated in humans, e.g., in studies on neurological disorders, as well as in animals, e.g., in studies on somatosensory receptive fields. Yet, it was so far not investigated how the boundaries of the body schema are formed. We have investigated if these subjective body boundaries result from the distinction of body and environment sensations regarding their predictability by the motor representation of active bodily self-motion. In a first study, we enabled healthy humans to control with their physical body a life-sized virtual body that was mirroring their movements in extrapersonal space. In a second study, we asked human subjects to control either such a mirror-avatar or one that was additionally unpredictably shaking. We found that the remote motor control of the mirror-avatar caused the body schema boundaries to decline while inhibiting the distinction of auto-kinesthetic sensations that are, and visual motion sensations that are not, directly predicted by the motor representation of active bodily self-motion. This sensorimotor body-environment distinction was instead intensified by the remote motor control of the shaking-avatar, which fortified the body schema boundaries. Our findings suggest that the distinction of sensations that are, and those that are not, directly predicted by motor representation based on their motor predictability is forming the human body schema boundaries. This sensorimotor body-environment distinction corresponds to the previously suggested self-world distinction that is regarded as fundamental for motor control to arise from sensorimotor integration and for body self-perception in general. Such a sensorimotor distinction and the body schema boundaries that it forms may accordingly affect the sense of agency and various other body self-perception components such as the sense of bodily self-identification and self-location.}, web_url = {http://www.abstractsonline.com/pp8/index.html#!/4071/presentation/29005}, event_name = {46th Annual Meeting of the Society for Neuroscience (Neuroscience 2016)}, event_place = {San Diego, CA, USA}, state = {published}, author = {Dobricki M{mdobricki}; Mohler BJ{mohler}} } @Poster{ MeilingerFBMSB2016, title = {Wie erinnern wir räumliches Wissen unseres Wohnortes?}, year = {2016}, month = {9}, day = {19}, web_url = {http://www.dgpskongress.de/frontend/index.php?page_id=453}, event_name = {50. Kongress der Deutschen Gesellschaft für Psychologie (DGPs 2016)}, event_place = {Leipzig, Germany}, state = {published}, author = {Meilinger T{meilinger}{Department Human Perception, Cognition and Action}; Frankenstein J{frankenstein}{Department Human Perception, Cognition and Action}; Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Simon N{nadinesimon}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ GeussMTM2016, title = {Body size estimations: the role of visual information from a first-person and mirror perspective}, journal = {Journal of Vision}, year = {2016}, month = {9}, volume = {16}, number = {12}, pages = {986}, abstract = {Our perception of our body, and its size, is important for many aspects of everyday life. Using a variety of measures, previous research demonstrated that people typically overestimate the size of their bodies (Longo & Haggard, 2010). Given that self-body size perception is informed from many different experiences, it is surprising that people do not perceive their bodies veridically. Here, we asked, whether different visual experiences of our bodies influence how large we estimate our body’s size. Specifically, participants estimated the width of four different body parts (feet, hips, shoulders, and head) as well as a noncorporeal object with No Visual Access, Self-Observation (1st person visual access), or looking through a Mirror (2nd person visual access) using a visual matching task. If estimates when given visual access (through mirror or 1st person perspective) differ from estimates made with no visual access, it would suggest that this method of viewing one’s body has less influence on how we represent the size of our bodies. Consistent with previous research, results demonstrated that in all conditions, each body part was overestimated. Interestingly, in the No Visual Access and Mirror conditions, the degree of overestimation was larger for upper body parts compared to lower body parts and there were no significant differences between the No Visual Access and Mirror conditions. There was, however, a significant difference between the Self-Observation condition and the other two conditions when estimating ones shoulder width. In the Self-Observation condition, participants were more accurate with estimating shoulder width. The similarity of results in the No Visual Access and Mirror conditions suggests that our representation of our body size may be partly based on experiences viewing one’s body in reflective surfaces.}, web_url = {jov.arvojournals.org/article.aspx?articleid=2550960}, event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/16.12.986}, author = {Geuss M{mgeuss}{Department Human Perception, Cognition and Action}; M\"olbert SC{smoelbert}; Thaler A{athaler}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ ThalerGMGBM2016, title = {Does Sensitivity to Weight Changes of Others Depend on Personal Body Size?}, year = {2016}, month = {9}, pages = {28}, abstract = {Previous research has suggested that size estimates of bodies (own and others') are biased towards an average reference body (Cornelissen et al., 2015; Cornelissen et al., 2016). The role of personal body size in body size perception of others is still unclear. In this study, we tested healthy females varying in body mass index (BMI) to investigate whether personal body size influenced accuracy of body size estimation and sensitivity to weight changes of others. We generated four biometric female avatars with BMIs of 15, 25, 35, and 45 and altered the weight of the avatars (5, 10, 15, and 20).}, web_url = {https://sites.google.com/site/nenaconference/nena-2016}, event_name = {17th Conference of Junior Neuroscientists of Tübingen (NeNa 2016): Neuroscience & Law}, event_place = {Schramberg, Germany}, state = {published}, author = {Thaler A{athaler}; Geuss MN{mgeuss}; M\"olbert SC{smoelbert}; Giel KE; Black MJ{black}; Mohler BJ{mohler}} } @Poster{ ThalerGMGSBM2016, title = {Investigating the influence of personal BMI on own body size perception in females using self-avatars}, journal = {Journal of Vision}, year = {2016}, month = {9}, volume = {16}, number = {12}, pages = {1400}, abstract = {Previous research has suggested that inaccuracies in own body size estimation can largely be explained by a known error in perceived magnitude, called contraction bias (Cornelissen, Bester, Cairns, Tovée & Cornelissen, 2015). According to this, own body size estimation is biased towards an average reference body, such that individuals with a low body mass index (BMI) should overestimate their body size and high BMI individuals should underestimate their body size. However, previous studies have mainly focused on self-body size evaluation of patients suffering from anorexia nervosa. In this study, we tested healthy females varying in BMI to investigate whether personal body size influences accuracy of body size estimation and sensitivity to weight changes, reproducing a scenario of standing in front of a full length mirror. We created personalized avatars with a 4D full-body scanning system that records participants’ body geometry and texture, and altered the weight of the avatars based on a statistical body model. In two psychophysical experiments, we presented the stimuli on a stereoscopic, large-screen immersive display, and asked participants to respond to whether the body they saw was their own. Additionally, we used several questionnaires to assess participants’ self-esteem, eating behavior, and their attitudes towards their body shape and weight. Our results show that participants, across the range of BMI, veridically perceived their own body size, contrary to what is suggested by the contraction bias hypothesis. Interestingly, we found that BMI influenced sensitivity to weight changes in the positive direction, such that people with higher BMIs were more willing to accept bigger bodies as their own. BMI did not influence sensitivity to weight changes in the negative direction.}, web_url = {jov.arvojournals.org/article.aspx?articleid=2551372}, event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/16.12.1400}, author = {Thaler A{athaler}{Department Human Perception, Cognition and Action}; Geuss MN{mgeuss}{Department Human Perception, Cognition and Action}; M\"olbert SC{smoelbert}; Giel KE; Streuber S{stst}{Department Human Perception, Cognition and Action}; Black MJ{black}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ ThalerGMSGBM2016, title = {Sensitivity to Weight Changes of Others Depends on Personal Body Size}, journal = {Perception}, year = {2016}, month = {8}, day = {29}, volume = {45}, number = {ECVP Abstract Supplement}, pages = {53-54}, abstract = {Previous research has suggested that own body size estimates are biased towards an average reference body (Cornelissen, Bester, Cairns, Tove´e & Cornelissen, 2015). The role of personal body size in body size perception of others is still unclear. In this study, we tested healthy females varying in body mass index (BMI) to investigate whether personal body size influenced accuracy of body size estimation and sensitivity to weight changes of others. We generated four biometric female avatars with BMIs of 15, 25, 35, and 45 and altered the weight of the avatars (5, 10, 15, and 20% BMI change) based on a statistical body model. In several psychophysical experiments, we presented the stimuli on a stereoscopic, large-screen immersive display. For each avatar series, participants memorized what the original body looked like and then responded for each of the presented bodies whether it was the same as the one memorized. Our results show that there was no influence of personal BMI on the accuracy of body size estimation of the avatars. Interestingly however, participants were more sensitive to weight changes of an avatar close in BMI to their own, suggesting that own body size influences perception of others’ weight.}, web_url = {http://journals.sagepub.com/doi/full/10.1177/0301006616671273}, event_name = {39th European Conference on Visual Perception (ECVP 2016)}, event_place = {Barcelona, Spain}, state = {published}, DOI = {10.1177/0301006616671273}, author = {Thaler A{athaler}; Geuss MN{mgeuss}; M\"olbert SC{smoelbert}; Streuber S{stst}; Giel KE; Black MJ{black}; Mohler BJ{mohler}} } @Poster{ MolbertTMSBKZG2016_2, title = {Investigating Body Image Disturbance in Anorexia Nervosa Using Biometric Self-Avatars in Virtual Reality}, year = {2016}, month = {7}, day = {27}, abstract = {norexia nervosa (AN) is a serious eating disorder that goes along with underweight and high rates of psychological and physical comorbidity. Body image disturbance is a core symptom of AN, but as yet distinctive features of this disturbance unknown. This study uses individual 3D-avatars in virtual reality to investigate the following questions: (1) Do women with AN differ from controls in how accurately they perceive their body weight? (2) Do women with AN generally perceive bodies of their own shape differently than controls or only when viewing their own body? We investigate 25 women with AN and 25 healthy controls. Based on a 3D body scan, we create individual avatars for each participant. The avatar is manipulated to represent +/- 5%, 10%, 15% and 20% of the participant’s weight. Additionally, for the control task, we manipulate identity of the avatar using a standard texture. Avatars were presented on a stereoscopic life-size screen. In the two-alternative forced choice (2AFC) task, participants see each avatar 20 times for two seconds. After each presentation, they have to decide whether that was the correct or a manipulated avatar. In the Method of Adjustment (MoA) task, participants are asked to adjust each avatar to match both, the correct size and their ideal size. In the control task, participants memorize the body with standard texture and afterwards perform the same 2AFC and MoA tasks with respect to the memorized body. Additionally, eating pathology, body dissatisfaction and self-esteem are assessed. First results from 19 women with AN and 16 controls show a tendency of patients to be accurate or to underestimate their current body size as compared to controls. In the control task, both groups accurately memorized and estimated the avatar’s weight. Our preliminary results indicate that body image disturbance in AN is not due to a general deficit in body size perception, but limited to the own person and influenced by evaluation.}, web_url = {http://www.vr-workshop-tuebingen.org/Schedule.html}, event_name = {Virtual Environments: Current Topics in Psychological Research: VECTOR Workshop}, event_place = {Tübingen, Germany}, state = {published}, author = {M\"olbert S{smoelbert}; Thaler A{athaler}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Streuber S{stst}{Department Human Perception, Cognition and Action}; Black MJ{black}; Karnath H-O; Zipfel S; Giel KE} } @Poster{ MolbertTMSBZG2016, title = {Untersuchung der Körperbildstörung bei Anorexia Nervosa mithilfe biometrischer Avatare in virtueller Realität}, year = {2016}, month = {3}, abstract = {Hintergrund: Die Körperbildstörung ist ein Kernsymptom der Anorexia Nervosa (AN). Sie gilt als Indikator für eine schlechte Prognose, als schwierig zu therapieren und besteht oft auch nach Gewichtszunahme fort. Diese Studie verwendet individuelle 3D-Avatare, um folgende Forschungsfragen näher zu untersuchen: (1) Repräsentieren AN Patientinnen körperbezogene Informationen generell anders oder ist die Körperbildstörung rein selbstbezogen? (2) Ist die Körperbildstörung eher perzeptuell oder eher durch dysfunktionale Bewertungen charakterisiert? Methoden: Untersucht werden N=20 AN-Patientinnen und N=20 Kontrollprobandinnen. Von jeder Teilnehmerin wird auf Basis eines 3D-Körperscans ein individueller Avatar mit 9 verschiedenen BMI-Stufen gefertigt, nämlich aktueller BMI und +/- 5%, 10%, 15% und 20% BMI. Um die Rolle des Selbstbezugs zu bestimmen, wird basierend auf der Figur der Teilnehmerin eine zweite Avatar-Serie mit dem Aussehen einer fremden Person erstellt. Die Avatare werden in einer virtuelle Realität-Umgebung lebensgroß und in 3D präsentiert. Das Experiment folgt einem 2x2 gemischten Design mit den Faktoren Gruppe (AN versus Kontrolle) und Avatar-Serie (eigenes versus fremdes Aussehen), wobei es zwei unterschiedliche Aufgabenformate gibt: Im "2 Alternatives Forced Choice Task" sieht die Teilnehmerin jeden Avatar 20 Mal für 2 Sekunden, anschließend muss sie entscheiden, ob dies der eigene bzw. richtige oder ein manipulierter Avatar war. Im "Method of Adjustment Task" soll die Teilnehmerin jeden der Avatare jeweils so verändern, dass er ihrem aktuellen bzw. dem richtigen Körper entspricht und zusätzlich jeweils so, dass er ihrem idealen Körper entspricht. Zusätzlich werden Selbstwert, Körperunzufriedenheit und Essstörungspathologie detailliert erfasst. Ergebnisse: Erste Ergebnisse von N=14 AN-Patientinnen zeigen für die Avatare mit eigenem Aussehen eine klare Neigung der Patientinnen, dünnere Avatare als den eigenen zu identifizieren bzw. einzustellen. Bei Avataren mit Aussehen einer fremden Person waren die AN-Patientinnen hingegen weitgehend akkurat. Bei den N=8 Kontrollprobanden zeigt sich zwar ein ähnliches Muster, jedoch deutlich weniger ausgeprägt. Schlussfolgerung: Unsere vorläufigen Ergebnisse weisen darauf hin, dass die Körperbildstörung bei AN-Patientinnen selbstbezogen und vor allem durch Bewertung charakterisiert ist und keine generell andere Wahrnehmung oder Verarbeitung von Körpern zugrunde liegt.}, web_url = {http://www.egms.de/dynamic/en/meetings/dgess2016/index.htm?sub=1&main=2}, event_name = {5. Wissenschaftlicher Kongress der Deutschen Gesellschaft für Essstörungen e.V. (DGESS 2016)}, event_place = {Essen, Germany}, state = {published}, DOI = {10.3205/16dgess087}, author = {M\"olbert SC{smoelbert}; Thaler A{athaler}; Mohler B{mohler}; Streuber S{stst}; Black MJ{black}; Zipfel S; Giel KE} } @Poster{ BulthoffMT2015, title = {Active and passive exploration of faces}, journal = {Perception}, year = {2015}, month = {8}, volume = {44}, number = {ECVP Abstract Supplement}, pages = {51}, abstract = {In most face recognition studies, learned faces are shown without a visible body to passive participants. Here, faces were attached to a body and participants were either actively or passively viewing them before their recognition performance was tested. 3D-laser scans of real faces were integrated onto sitting or standing full-bodied avatars placed in a virtual room. In the ‘active’ learning condition, participants viewed the virtual environment through a head-mounted display. Their head position was tracked to allow them to walk physically from one avatar to the next and to move their heads to look up or down to the standing or sitting avatars. In the ‘passive dynamic’ condition, participants saw a rendering of the visual explorations of the first group. In the ‘passive static’ condition, participants saw static screenshots of the upper bodies in the room. Face orientation congruency (up versus down) was manipulated at test. Faces were recognized more accurately when viewed in a familiar orientation for all learning conditions. While active viewing in general improved performance as compared to viewing static faces, passive observers and active observers - who received the same visual information - performed similarly, despite the absence of volitional movements for the passive dynamic observers.}, web_url = {http://pec.sagepub.com/content/44/1_suppl.toc}, event_name = {38th European Conference on Visual Perception (ECVP 2015)}, event_place = {Liverpool, UK}, state = {published}, DOI = {10.1177/0301006615598674}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Poster{ MeilingerFMB2014, title = {How to remember Tübingen? Reference frames in route and survey knowledge of one’s city of residency}, journal = {Cognitive Processing}, year = {2014}, month = {9}, volume = {15}, number = {Supplement 1}, pages = {S53-S54}, abstract = {Knowledge underlying everyday navigation is distinguished into route and survey knowledge (Golledge 1999). Route knowledge allows re-combining and navigating familiar routes. Survey knowledge is used for pointing to distant locations or finding novel shortcuts. We show that within one’s city of residency route and survey knowledge root in separate memories of the same environment and are represented within different reference frames. Twenty-six Tu¨bingen residents who lived there for seven years in average faced a photo- realistic virtual model of Tübingen and completed a survey task in which they pointed to familiar target locations from various locations and orientations. Each participant’s performance was most accurate when facing north, and errors increased as participants’ deviation from a north-facing orientation increased. This suggests that participants’ survey knowledge was organized within a single, north-oriented reference frame. One week later, 23 of the same participants conducted route knowledge tasks comprising of the very same start and goal locations used in the survey task before. Now participants did not point to a goal location, but used arrow keys of a keyboard to enter route decisions along an imagined route leading to the goal. Deviations from the correct number of left, straight, etc. decisions and response latencies were completely uncorrelated to errors and latencies in pointing. This suggests that participants employed different and independent representations for the matched route and survey tasks. Furthermore, participants made fewer route errors when asked to respond from an imagined horizontal walking perspective rather than from an imagined constant aerial perspective which replaced left, straight, right decisions by up, left, right, down as in a map with the order tasks balanced. This performance advantage suggests that participants did not rely on the single, north-up reference used for pointing. Route and survey knowledge were organized along different reference frames. We conclude that our participants’ route knowledge employed multiple local reference frames acquired from navigation whereas their survey knowledge relied on a single north-oriented reference frame learned from maps. Within their everyday environment, people seem to use map or navigation-based knowledge according to which best suits the task.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-014-0632-2.pdf}, event_name = {12th Biannual Conference of the German Cognitive Science Society (KogWis 2014)}, event_place = {Tübingen, Germany}, state = {published}, DOI = {10.1007/s10339-014-0632-2}, author = {Meilinger T{meilinger}{Department Human Perception, Cognition and Action}; Frankenstein J{frankenstein}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ PiryankovaSRdBM2014_3, title = {Can I recognize my body's weight? The influence of shape and texture on the perception of self}, year = {2014}, month = {8}, file_url = {fileadmin/user_upload/files/publications/2014/SIGGRAPH-2014-Piryankova.pdf}, web_url = {http://s2014.siggraph.org/}, event_name = {41st International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH 2014)}, event_place = {Vancouver, Canada}, state = {published}, author = {Piryankova IV{ivelina}{Department Human Perception, Cognition and Action}; Stefanucci JK{jstefanucci}{Department Human Perception, Cognition and Action}; Romero J{jromero}; de la Rosa S{delarosa}{Department Human Perception, Cognition and Action}; Black MJ{black}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ VolkovaMDTB2013, title = {Perception of emotional body expressions in narrative scenarios}, year = {2013}, month = {8}, pages = {135}, abstract = {People use body motion to express and recognise emotions. We investigated whether emotional body expressions can be recognised when they are recorded during natural narration, where actors freely express the emotional colouring of a story told. We then took only the upper body motion trajectories and presented them to participants in the form of animated stick figures. The observers were asked to categorise the emotions expressed in short motion sequences. The results show that recognition level of eleven emotions shown via upper body is significantly above chance level and the responses to motion sequences are consistent across observers.}, web_url = {https://www.scss.tcd.ie/conferences/SAP2013/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM Symposium on Applied Perception (SAP '13)}, event_place = {Dublin, Ireland}, state = {published}, ISBN = {978-1-4503-2262-1}, DOI = {10.1145/2492494.2501892}, author = {Volkova EK{evolk}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Dodds T{dodds}{Department Human Perception, Cognition and Action}; Tesch J{jtesch}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ WellerdiekLVCM2013, title = {Recognizing your own motions on virtual avatars: is it me or not?}, year = {2013}, month = {8}, pages = {138}, abstract = {Most of the time point-light figures are used for motion-recognition, which present motions by only displaying the moving joints of the actor. In this study we were interested in whether self-recognition of motion changes with different representations. First, we captured participants' motions and remapped them on a point-light figure and a male and female virtual avatar. In the second part the same participants were asked to recognize their own motions on all three representations. We found that the recognition rate for own motions is high across all representations and different actions. The recognition rate was better on the point-light figure, despite being perceived as most difficult from the participants. The gender of the visual avatar did not matter in self-recognition.}, web_url = {https://www.scss.tcd.ie/conferences/SAP2013/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM Symposium on Applied Perception (SAP '13)}, event_place = {Dublin, Ireland}, state = {published}, ISBN = {978-1-4503-2262-1}, DOI = {10.1145/2492494.2501895}, author = {Wellerdiek AC{awellerdiek}{Department Human Perception, Cognition and Action}; Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Volkova E{evolk}{Department Human Perception, Cognition and Action}; Chang D-S{dong}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ SaultonDTMB2013, title = {The influence of shape and culture on visual volume perception of virtual rooms}, year = {2013}, month = {8}, pages = {142}, abstract = {The ability of humans to apprehend the overall size or volume of an indoor space is not well understood. Previous research has highlighted a 'rectangularity illusion', in which rectangular rooms appear to be larger than square rooms of the same size (identical volume), showing that the subjective perceived space cannot be explained from the mathematical formula for volume, i.e. length × width × height. Instead, the results suggest that one might use the longest dimension of the space as a simplified strategy to assess room size [Sadalla and Oxley 1984].}, file_url = {fileadmin/user_upload/files/publications/2013/SAP-2013-Saulton.pdf}, web_url = {https://www.scss.tcd.ie/conferences/SAP2013/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_name = {ACM Symposium on Applied Perception (SAP '13)}, event_place = {Dublin, Ireland}, state = {published}, ISBN = {978-1-4503-2262-1}, DOI = {10.1145/2492494.2501900}, author = {Saulton A{asaulton}{Department Human Perception, Cognition and Action}; Dodds TJ{dodds}{Department Human Perception, Cognition and Action}; Tesch J{jtesch}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ VolkovaMB2013, title = {Display size of biological motion stimulus influences performance in a complex emotional categorisation task}, journal = {Journal of Vision}, year = {2013}, month = {7}, volume = {13}, number = {9}, pages = {195}, abstract = {People are remarkably good at detecting familiarity with actor (Loula et al., 2005), recognizing the gender (Pollick et al., 2005), emotions (Atkinson et al., 2004) and actions of an actor when presented as biological motion. For many of these tasks the influence of the type of stimuli display (point light display, virtual avatar, full light video) on participants' performance has been well researched (McDonnell et al., 2009). The effect of the size of the display, however, remains underinvestigated. According to our hypothesis, a naturalistic environment and stimuli display would enhance performance, in particular for challenging tasks. We motion captured eight actors, who were asked to portray the following ten emotions while seated: amusement, anger, disgust, fear, joy, pride, relief, sadness, shame, and surprise. The resulting 80 motion sequences were then applied to a stick figure and used for the emotion recognition study. As a between participant factor, the stick figure animations were presented either on a laptop screen or on a large back projection surface. In the latter condition the size of the stick figure matched the natural size of the actors. Thirty-two participants (16 female) took part in a between-subject study (gender balanced). For each stimulus the participant had to make a ten-alternative forced choice to categorize the animation as one of ten emotions. Recognition accuracy was significantly higher for natural size condition (38% accuracy for back projection condition vs. 31% for desktop), and reaction time was lower (2.3 animation repetitions for back projection condition vs. 2.7 for desktop condition). In both conditions the emotional categories were an important factor as some emotions were more easily recognized than others. The results show that for complex tasks, e.g. discrimination among multiple emotional categories, enhanced naturalness of stimuli can be beneficiary for the observer.}, web_url = {http://www.journalofvision.org/content/13/9/195.short}, event_name = {13th Annual Meeting of the Vision Sciences Society (VSS 2013)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/13.9.195}, author = {Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ DobrickiMB2012, title = {Onset time of visually induced circular self-motion perception as an indicator for altered self-localization in immersive virtual reality}, journal = {Journal of Vision}, year = {2012}, month = {8}, volume = {12}, number = {9}, pages = {1326}, abstract = {In the framework of the experimental induction of full-body illusions the features of the experience of being a distinct entity (selfhood) are altered such that participants identify with and mis-localize themselves towards a virtual body. On the other hand, it has been found that the experience of circular self-motion, or vection, can be induced by rotating a naturalistic visual environment around human participants. Circular vection is likely influenced by a person’s self-localization, since it is the illusion of self-rotation around a specific location. Thus, estimates of vection may serve as indicators for altered self-localization. In the framework of a within-subjects design experiment, male participants viewed an avatar from behind within a naturalistic virtual city in a head-mounted display setup. First, we stroked their back for three minutes while they watched the avatar getting synchronously and congruently stroked, or no visuo-tactile stroking was applied (stimulation factor). Subsequently, we assessed their identification with the avatar with a questionnaire, and then repeated the initial treatment. Finally, we rotated the participants’ perspective around their vertical axis for one minute. During rotation the avatar was in the same location in front of the viewer, rotating around his axis, or in a standing posture (avatar-motion factor). Participants were asked to indicate when they started to experience vection. They reported significantly higher identification with the avatar and self-localization in the avatar’s position after visuo-tactile stimulation. Moreover, when they experienced visuo-tactile stimulation, regardless of the avatar-motion factor, participants showed a later onset of vection. One possible explanation for these results is that participants perceived themselves as partially localized in the avatar’s position, and in turn this decrease in their accuracy of self-localization delayed their experience of circular vection. Consequently, we suggest estimates of self-motion as a new measure for selfhood and embodiment, and specifically for self-localization.}, web_url = {http://www.journalofvision.org/content/12/9/1326.abstract}, event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/12.9.1326}, author = {Dobricki M{mdobricki}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ LinkenaugerMPB2012, title = {The Role of Visual Foot Size in Perceiving Object Size from Texture Gradient}, journal = {Journal of Vision}, year = {2012}, month = {8}, volume = {12}, number = {9}, pages = {902}, abstract = {The ground plane’s texture gradient is a well-known, perspective depth cue that derives from the fact that, for a uniformly textured surface, texture elements become smaller and more densely arrayed in the visual field with increased in distance e.g. grass on a field or cobble stones on a street. This size / distance relationship also occurs for objects such that objects of equal size occlude an equal amount of texture at their base regardless of their distance from the observer. Texture gradients have been studied primarily as a relative depth cue that specifies the size of one object relative to another. However, more definite relative scaling can be achieved if the size of texture elements is scaled to some known metric. We hypothesized that perceivers use the amount of texture occluded by their own feet to scale the sizes of objects on a textured ground. Using head-mounted displays and a motion capture system, we were able to increase or decrease the apparent size of participants’ visual feet in a virtual environment. We asked participants to verbally estimate the width and height of many objects using meters and centimeters(varying in size at the base). As hypothesized, perceivers’ estimations of the sizes of cylinders were smaller when participants had larger virtual feet and larger when participants had smaller virtual feet. This demonstrates that texture gradient, in combination with the visual self-located body, can be used to estimate the size of objects.}, web_url = {http://www.journalofvision.org/content/12/9/902.abstract}, event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/12.9.902}, author = {Linkenauger S{sally}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Proffitt D; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ VolkovaMLAB2011, title = {Contribution of Prosody in Audio-visual Integration to Emotional Perception of Virtual Characters}, journal = {i-Perception}, year = {2011}, month = {10}, day = {17}, volume = {2}, number = {8}, pages = {774}, abstract = {Recent technology provides us with realistic looking virtual characters. Motion capture and elaborate mathematical models supply data for natural looking, controllable facial and bodily animations. With the help of computational linguistics and artificial intelligence, we can automatically assign emotional categories to appropriate stretches of text for a simulation of those social scenarios where verbal communication is important. All this makes virtual characters a valuable tool for creation of versatile stimuli for research on the integration of emotion information from different modalities. We conducted an audio-visual experiment to investigate the differential contributions of emotional speech and facial expressions on emotion identification. We used recorded and synthesized speech as well as dynamic virtual faces, all enhanced for seven emotional categories. The participants were asked to recognize the prevalent emotion of paired faces and audio. Results showed that when the voice was recorded, the vocalized emotion influenced participants’ emotion identification more than the facial expression. However, when the voice was synthesized, facial expression influenced participants’ emotion identification more than vocalized emotion. Additionally, individuals did worse on a identifying either the facial expression or vocalized emotion when the voice was synthesized. Our experimental method can help to determine how to improve synthesized emotional speech.}, web_url = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic774}, event_name = {12th International Multisensory Research Forum (IMRF 2011)}, event_place = {Fukuoka, Japan}, state = {published}, DOI = {10.1068/ic774}, author = {Volkova E{evolk}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Linkenauger S{sally}{Department Human Perception, Cognition and Action}; Alexandrova I{ivelina}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ VolkovaLABM2011, title = {Integration of Visual and Auditory Stimuli in the Perception of Emotional Expression in Virtual Characters}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {138}, abstract = {Virtual characters are a potentially valuable tool for creating stimuli for research investigating the perception of emotion. We conducted an audio-visual experiment to investigate the effectiveness of our stimuli to convey the intended emotion. We used dynamic virtual faces in addition to pre-recorded (Burkhardt et al, 2005, Interspeech'2005, 1517–1520) and synthesized speech to create audio-visual stimuli which conveyed all possible combinations of stimuli. Each voice and face stimuli aimed to express one of seven different emotional categories. The participants made judgments of the prevalent emotion. For the pre-recorded voice, the vocalized emotion influenced participants’ emotion judgment more than the facial expression. However, for the synthesized voice, facial expression influenced participants’ emotion judgment more than vocalized emotion. While participants rather accurately labeled (>76%) the stimuli when face and voice emotion were the same, they performed worse overall on correctly identifying the stimuli when the voice was synthesized. We further analyzed the difference between the emotional categories in each stimulus and found that valence distance in the emotion of the face and voice significantly impacted recognition of the emotion judgment for both natural and synthesized voices. This experimental design provides a method to improve virtual character emotional expression.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Volkova E{evolk}{Department Human Perception, Cognition and Action}; Linkenauger S{sally}{Department Human Perception, Cognition and Action}; Alexandrova I{ivelina}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ LeyrerLBKM2011_3, title = {Perception of the size of self and the surrounding visual world in immersive virtual environments}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {209}, abstract = {Newer technology allows for more realistic virtual environments by providing visual image quality that is very similar to that in the real world, this includes adding in virtual self-animated avatars [Slater et al, 2010 PLoS ONE 5(5); Sanchez-Vives et al, 2010 PLoS ONE 5(4)]. To investigate the influence of relative size changes between the visual environment and the visual body, we immersed participants into a full cue virtual environment where they viewed a self-animated avatar from behind and at the same eye-height as the avatar. We systematically manipulated the size of the avatar and the size of the virtual room (which included familiar objects). Both before and after exposure to the virtual room and body, participants performed an action-based measurement and made verbal estimates about the size of self and the world. Additionally we measured their subjective sense of body ownership. The results indicate that the size of the self-representing avatar can change how the user perceives and interacts within the virtual environment. These results have implications for scientists interested in visual space perception and also could potentially be useful for creating positive visual illusions (ie the feeling of being in a more spacious room).}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Kloos U; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ LeyrerLBKM2011_2, title = {The influence of a scaled third-person animated avatar on perception and action in virtual environments}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {69}, abstract = {Newer technology is allowing for virtual environments to become more realistic by providing visual image quality that is very similar to that in the real world. Regardless, egocentric distances estimates in virtual reality have been shown to be underestimated (Thompson et al., 2004). Interestingly, this underestimation decreases after individuals view self-representing avatars in the virtual environment; especially when the avatars are self-animated (Mohler et al., 2010). These finding support perspectives on embodied perception which assert that the body and its action capabilities can act as a “perceptual ruler” that the perceiver uses to scale the world. To test this perspective, we immersed participants into a full-cue, virtual environment where they viewed a self-animated avatar from behind at a distance of 3.5 m away at the same eye-height as the avatar. We manipulated the relationship between the size of the avatar and the size of the virtual room (which included familiar objects) to see if participants would attribute these changes either to the size of the world or to the size of their body. Participants made verbal estimates about the size of self and the world and performed a walking-in-place task. We found that participants verbally attributed the apparent size difference to the virtual world and not to the self which suggests that space perception is grounded in the physical body. Further, we found an influence of condition on the post/pre walking-in-place drift suggesting that the participants felt embodied in the third person animated avatar. Further research needs to be conducted in order to fully understand the relative importance of visual cues about self, such as motion coupling, eye-height and distance of avatar from observer, on perception and action in virtual worlds.}, web_url = {http://www.journalofvision.org/content/11/11/69.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.69}, author = {Leyrer M{leyrer}{Department Human Perception, Cognition and Action}; Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Kloos U; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ BulthoffSMT2011, title = {Using avatars to explore height/pitch effects when learning new faces}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {596}, abstract = {In a previous series of desktop experiments we found no evidence that individuals' height influenced their representation of others' faces or their ability to process faces viewed from above or below (VSS 2009). However, in those experiments face orientation and body height were ambiguous as isolated faces were shown on a computer screen to an observer sitting on a chair. To address those concerns and to specifically examine the influence of learned viewpoint, we created a virtual museum containing 20 full-bodied avatars (statues) that were either sitting or standing. Using a head-mounted display, observers walked through this virtual space three times, approached each statue and viewed them from any horizontal (yaw) angle without time restrictions. We equated eye-level - and thus simulated height – for all participants and restricted their vertical movement to ensure that the faces of sitting avatars were always viewed from above and standing avatars from below. After familiarization, recognition was tested using a standard old-new paradigm in which 2D images of the learnt faces were shown from various viewpoints. Results showed a clear influence of learned viewpoint. Faces that had been learned from above (below) were recognized more quickly and accurately in that orientation than from the opposite orientation. Thus, recognition of specific, newly learned faces appears to be view-dependent in terms of pitch angle. Our failure to find a height effect in our previous study suggests that the variety of views of human faces experienced during a lifetime and possibly the preponderance of conversational situations between humans at close range typically counteracts any influence that body size might have on a person's viewing experience of others' faces.}, web_url = {http://www.journalofvision.org/content/11/11/596.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.596}, author = {B\"ulthoff I{isa}{Department Human Perception, Cognition and Action}; Shrimpton S{sezys}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Thornton IM{ian}{Department Human Perception, Cognition and Action}} } @Poster{ LinkenaugerMB2011, title = {Welcome to Wonderland: The Apparent Size of the Body Influences Perceived Extents in Virtual Environments}, journal = {Journal of Vision}, year = {2011}, month = {9}, volume = {11}, number = {11}, pages = {70}, abstract = {According to the functional approach to the perception of spatial layout, angular optic variables that indicate extents are scaled to the action capabilities of the body (see Proffitt, 2006, POPS, for a review). For example, reachable extents are perceived as a proportion of the maximum extent to which one can reach, and the apparent sizes of graspable objects are perceived as a proportion of the maximum extent that one can grasp (Linkenauger et al., 2009, JEP:HPP; Linkenauger, Ramenzoni, & Proffitt, 2010, Psychol Sci; Witt, Proffitt, & Epstein, 2005, JEP:HPP). Therefore, apparent distances and sizes can be influenced by changing the action capabilities of the body. In order to directly manipulate the perceived action capabilities of the body, participants were immersed into a full cue virtual environment. In real time, participants' hand, arm, and head movements were mapped onto a self-avatar which the participant viewed from the first-person perspective via a head-mounted display. To manipulate perceived action capabilities, the apparent size of the participants' hand was altered by decreasing or increasing the size of the self-avatar's virtual hand (small, normal, and large). Participants estimated the sizes of various objects in the virtual environment. Participants perceived objects to be larger when their virtual hand was smaller and perceived objects to be smaller when their virtual hand was larger. Consistent with the functional approach, the differences in apparent size across the conditions increased as a function of object size, suggesting changes in the scaling metric rather than a constant bias.}, web_url = {http://www.journalofvision.org/content/11/11/70.abstract}, event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/11.11.70}, author = {Linkenauger SA{sally}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 6755, title = {Velocity-dependent curvature gain and avatar use for Redirected Walking}, year = {2010}, month = {10}, pages = {1-2}, abstract = {We investigated in a study whether humans’ sensitivity to curved walking is affected by their walking velocity. Amongst other techniques, redirecting users of an immersive virtual environment on a curved path is part of the so-called ’Redirected Walking’. We conducted an experiment in which 12 participants walked specific curvatures at given speeds in a VR. We found that people are significantly less sensitive to walking on a curve when walking slower. Moreover, we assume the possibility of using avatars to support redirection algorithms as it was shown by Llobera et al. ([LSRS10]) that proxemics holds true for avatars in virtual environments, too. In this work, we depict three possible applications of how avatars could be used to achieve a better redirection.}, file_url = {/fileadmin/user_upload/files/publications/JVRC_Manuscript_[0].pdf}, web_url = {http://www.interaction-design.org/references/conferences/proceedings_of_the_joint_virtual_reality_conference_of_egve_-_eurovr_-_vec.html}, editor = {Kuhlen, T. , S. Coquillart, V. Interrante}, publisher = {Eurographics Association}, address = {Goslar, Germany}, booktitle = {Virtual Environments 2010}, event_name = {2010 Joint Virtual Reality Conference of EuroVR - EGVE - VEC (JVRC 2010)}, event_place = {Stuttgart, Germany}, state = {published}, ISBN = {978-3-905674-30-9}, author = {Neth CT{neth}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}; Engel D{engel}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ WallravenSMVAVP2010, title = {Understanding Objects and Actions: a VR Experiment}, year = {2010}, month = {9}, pages = {1-2}, abstract = {The human capability to interpret actions and to recognize objects is still far ahead of that of any technical system. Thus, a deeper understanding of how humans are able to interpret human (inter)actions lies at the core of building better artificial cognitive systems. Here, we present results from a first series of perceptual experiments that show how humans are able to infer scenario classes, as well as individual actions and objects from computer animations of everyday situations. The animations were created from a unique corpus of real-life recordings made in the European project POETICON using motion-capture technology and advanced VR programming that allowed for full control over all aspects of the finally rendered data.}, file_url = {fileadmin/user_upload/files/publications/2011/JVRC-2010-Wallraven.pdf}, web_url = {http://www.interaction-design.org/references/conferences/proceedings_of_the_joint_virtual_reality_conference_of_egve_-_eurovr_-_vec.html}, event_name = {2010 Joint Virtual Reality Conference of EuroVR - EGVE - VEC (JVRC 2010)}, event_place = {Stuttgart, Germany}, state = {published}, author = {Wallraven C{walli}{Department Human Perception, Cognition and Action}; Schultze M{mschultze}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Volkova E{evolk}{Department Human Perception, Cognition and Action}; Alexandrova I{ivelina}{Department Human Perception, Cognition and Action}; Vatakis A{vatakis}{Department Human Perception, Cognition and Action}; Pastra K} } @Poster{ 6997, title = {Changing our perception of communication in virtual environments}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {183}, abstract = {When people communicate face-to-face they use gestures and body language that naturally coincide with speech [McNeill, 2007, Gesture & Thought, University of Chicago Press.]. In an immersive virtual environment (VE) we can control both participants' visual feedback of self and the other in order to investigate the effect of gestures on a communication task. In our experiment the communication task is to make the listener say a word without the speaker saying the word. We use animated real-time self-avatars in immersive VEs to answer the question: `Does the use of naturalistic gestures help communication in VEs'. Specifically, we perform a within-subject experiment which investigates the influence of first- and third-person perspectives, and of animated speaker and listener. We find that people significantly perform better in the communication task when both the speaker and listener have an animated self-avatar and when the camera for the speaker shows a third-person perspective. When participants moved more they also performed better in the task. These results suggest that when two people in a VE are animated they do use gestures to communicate. These results demonstrate that in addition to the speaker movements, the listener movements are important for efficient communication in an immersive VE.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Dodds TJ{dodds}; Mohler BJ{mohler}; B\"ulthoff HH{hhb}} } @Poster{ 6624, title = {Egocentric distance judgments in a large-screen immersive display virtual environment}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {51}, abstract = {People underestimate egocentric distances in head-mounted display virtual environments (VEs) as compared to the real world. In a recent study [Riecke et al, 2009, APGV, 15-18] it was shown that when a person viewed a photorealistic world through an HMD or a large screen display (while sitting and not moving their head) they did not underestimate egocentric distances. We further investigate whether people underestimate egocentric distances in a large screen immersive display. In our experiment, participants were asked to report verbal estimates of egocentric distances in a large screen display with floor projection or in the real world. Overall, in the virtual world we found an underestimation of distances by 17% as compared to near accurate performance in the real world. Moreover, in the virtual world there was an effect of distance, which indicated overestimation for distances that occurred before the screen (3.5 meters), while for distances past the physical screen there was underestimation. To determine th e full reason for these effects further analysis is needed. Our results demonstrate that egocentric distance judgments are also underestimated in a large screen immersive display as compared to the real world.}, file_url = {/fileadmin/user_upload/files/publications/Alexandrova_JVRC_authors_version_6624[0].pdf}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; Teneva PT{pteneva}{Department Human Perception, Cognition and Action}; Kloos U; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ 6550, title = {The effect of walking speed on the sensitivity to curved walking in an immersive Virtual Environment}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {96}, abstract = {People are relatively insensitive to the curvature of their walking trajectory [Kallie et al., 2007, JEP:HPP, 33(1), 183-200]. This is exploited in the "Redirected Walking" technique which is used in Virtual Reality to extend the borders of Virtual Environments (VE) beyond the size of the physical walking area [Steinicke et al., 2009, Journal of Virtual Reality and Broadcasting, 6(2009)]. One method is to slowly rotate the VE while the user is aiming to walk a straight path, inducing him/her to unknowingly walk on a curved trajectory. We tested whether the sensitivity to curvature depends on walking speed. Participants followed a virtual sphere in a VE, which moved on a straight path. During walking, the entire visual scene was rotated, creating a curved real-world trajectory (radius 20-200m). Walking speed was 0.75, 1.0, or 1.25 m/s. Participants indicated whether their physical walking path curved to the left or right. Discrimination thresholds were estimated by fitting a psychometric function to the propor tion of trials in which the trajectory was reported to curve to the left. Curvature thresholds were found to be higher for slow walking. This suggests that the effectiveness of the redirected walking technique depends on walking speed.}, file_url = {/fileadmin/user_upload/files/publications/Abstract%20ECVP10_[0].pdf}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Neth C{neth}{Department Human Perception, Cognition and Action}; Souman JL{souman}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Kloos U; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ 6537, title = {The impact of an animated avatar on egocentric distance perception in an immersive virtual environment}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {51}, abstract = {To date, few head-mounted display (HMD) virtual environment systems display a rendering of the user’s own body. Subjectively, this often leads to a sense of disembodiment in the VE. In a recent study, we found that the experience with a self-avatar changed the typical pattern of distance underestimation seen in many HMD studies (Mohler et al, in press Presence). Users showed an increase in distance estimations with avatar experience, especially when the avatar was animated in correspondence with their own body-movements. The effect occurred for both co-located self-avatars and self-avatars viewed from the third person perspective. The current study investigated the importance of the degree to which self-avatar animation reflected the actual movements of the user. We compared distance judgments with a third-person perspective view of a self-avatar that was either controlled by user motions or was animated based on pre-recorded motion data. The results suggest that experience with an animated avatar, even if not in correspondence with a user’s own body movements, increases distance estimates. The magnitude of this effect will be further examined with additional participants.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Creen-Regehr SH; Thompson WB{wthompson}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ VolkovaABM2010, title = {Virtual storytelling of fairy tales: Towards simulation of emotional perception of text}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {31}, abstract = {Emotion analysis (EA) is a rapidly developing area in computational linguistics. For most EA systems, the number of emotion classes is very limited and the text units the classes are assigned to are discrete and predefined. The question we address is whether the set of emotion categories can be enriched and whether the units to which the categories are assigned can be more flexibly defined. Six untrained participants annotated a corpus of eight texts having no predetermined annotation units and using fifteen emotional categories. The inter-annotator agreement rates were considerably high for this difficult task: 0.55 (moderate) on average, reaching 0.82 (almost perfect) with some annotator pairs. The final application of the intended EA system is predominantly in the emotion enhancement of human–computer interaction in virtual reality. The system is meant to be a bridge between unprocessed input text and auditory and visual information: generated speech, animation of facial expressions and body language. The first steps towards integrating text-based information annotated for emotion categories and simulation of human emotional perception of texts in story telling scenarios for virtual reality are already made. We have created a virtual character, whose animation of face and body is driven by annotations in text.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, event_name = {33rd European Conference on Visual Perception}, event_place = {Lausanne, Switzerland}, state = {published}, DOI = {10.1177/03010066100390S101}, author = {Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Alexandrova IV{ivelina}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Poster{ 5908, title = {Distance and alignment effects in survey knowledge of a highly familiar city}, journal = {Perception}, year = {2009}, month = {8}, volume = {38}, number = {ECVP Abstract Supplement}, pages = {63}, abstract = {In this experiment we examined alignment and distance effects in human memory for a highly familiar environmental space. Twenty-seven participants who lived on average seven years in Tübingen saw a photorealistic virtual model of the city centre of Tübingen (Virtual Tübingen) through a head-mounted display. They were teleported to five different places in Virtual Tübingen and asked to point towards well-known target locations. This procedure was repeated 36 times for each of the target locations in 12 different body orientations. Participants pointed much more accurately when oriented northwards regardless of target. There were no significant correlations between straight line distance to the pointing target and pointing speed or accuracy. These results are consistent with the assumption that all locations were represented within one oriented coordinate system. Even though this is predicted by reference direction theory, it is unclear why, first, almost all participants have the same reference direction, and second, why this direction is north. We discuss our results with respect to well-known theories of spatial memory and speculate that the bias for a north orientation is because participants rely on the memory of a map of Tübingen for their response.}, web_url = {http://pec.sagepub.com/content/38/1_suppl.toc}, event_name = {32nd European Conference on Visual Perception}, event_place = {Regensburg, Germany}, state = {published}, DOI = {10.1177/03010066090380S101}, author = {Frankenstein J{frankenstein}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}; Meilinger T{meilinger}{Department Human Perception, Cognition and Action}} } @Poster{ 5474, title = {Multimodal integration in the estimation of walked distances}, year = {2008}, month = {7}, volume = {9}, number = {163}, pages = {83}, abstract = {When walking through space, both, dynamic visual information (i.e. optic flow), and body-based information (i.e., proprioceptive/efference copy and vestibular) jointly specify the magnitude of a distance travelled. While recent evidence has demonstrated the extent to which each of these cues can be used independently, relatively little is known about how they are integrated when simultaneously present. In this series of experiments, participants first travelled along a predefined distance and subsequently matched this distance by adjusting an egocentric, in-depth target. Visual information was presented via a head-mounted display and consisted of a long, richly textured, virtual hallway. Body-based cues were provided either by walking in a fully-tracked, free-walking space or by walking on a large, linear treadmill. Travelled distances were provided either through optic flow alone, body-based cues alone (i.e. blindfolded walking), or through both cues combined. In the combined condition, visually-specified distances were either congruent (1.0x) or incongruent (0.7x or 1.4x) with distances specified by body-based cues. The incongruencies were introduced either by changing the visual gain during natural walking or the proprioceptive gain during treadmill walking. Responses reflect a combined effect of both visual and body-based information, with an overall higher influence of body-based cues.}, web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf}, event_name = {9th International Multisensory Research Forum (IMRF 2008)}, event_place = {Hamburg, Germany}, state = {published}, author = {Campos J{camposjl}{Department Human Perception, Cognition and Action}; Butler J{butler}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5137, title = {High-precision capture of perceived velocity during passive translations}, journal = {Journal of Vision}, year = {2008}, month = {6}, volume = {8}, number = {6}, pages = {1043}, abstract = {Although self-motion perception is believed to rely heavily on visual cues, the inertial system also provides valuable information about movement through space. How the brain integrates inertial signals to update position can be better understood through a detailed characterization of self-motion perception during passive transport. In this study, we employed an intuitive method for measuring the perception of self-motion in real-world coordinates. Participants were passively translated by a robotic wheelchair in the absence of visual and auditory cues. The traveled trajectories consisted of twelve straight paths, five to six meters in length, each with a unique velocity profile. As participants moved, they pointed continuously toward a stationary target viewed at the beginning of each trial. By using an optical tracking system to measure the position of a hand-held pointing device, we were able to calculate participants' perceived locations with a high degree of spatial and temporal precision. Differentiating perceived location yielded absolute instantaneous perceived velocity (in units of meters per second), a variable that, to the best of our knowledge, has not previously been measured. Results indicate that pointing behavior is updated as a function of changes in wheelchair velocity, and that this behavior reflects differences in starting position relative to the target. During periods of constant, nonzero velocity, the perceived velocity of all participants decreases systematically over the course of the trajectory. This suggests that the inertial signal is integrated in a leaky fashion, even during the relatively short paths used in this experiment. This methodology allows us to characterize such nonveridical aspects of self-motion perception with more precision than has been achieved in the past. The continuous-pointing paradigm used here can also be effectively adapted for use in other research domains, including spatial updating, vection, and visual-vestibular integration.}, file_url = {/fileadmin/user_upload/files/publications/VSS%20Poster%20Final%20Small_[0].pdf}, web_url = {http://www.journalofvision.org/8/6/1043/}, event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/8.6.1043}, author = {Siegle JH{jsiegle}; Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Loomis JM{loomis}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5462, title = {Imagined self-motion differs from perceived self-motion}, journal = {Journal of Vision}, year = {2008}, month = {6}, volume = {8}, number = {6}, pages = {1147}, abstract = {Self-motion perception refers to the ability to perceive the speed and direction of movement through space. Past measures of self-motion perception have failed to directly assess the dynamic, instantaneous properties of perceived self-motion in real-world coordinates. Here we developed a novel continuous pointing method to measure perceived self-motion during translational movements. This experiment took place in a large, fully tracked, free-walking space. Participants viewed a target and then, with eyes closed, attempted to point continuously towards the target as they moved past it along a straight, forward trajectory. Pointing behaviour was tracked using a high-precision optical tracking system which monitored a hand-held pointing device. By using arm angle, we continuously measured participants' perceived location and, hence, perceived self-velocity during the entire trajectory. We compared the natural characteristics of continuous pointing in a control condition (sighted walking) with that during conditions in which particular sensory/motor cues were reduced, including: blind walking, passive transport, and imagined walking in the complete absence of physical movement. Results demonstrate that under all reduced cue conditions involving actual movement, perceived self-velocity and displacement were relatively accurate. Specifically, the pattern of pointing in the blind walking condition did not differ from that of the passive transport condition. This indicates that, for simple, linear trajectories with a raised-cosine velocity profile, inertial cues alone can be used to perceive self-motion. Perhaps most interestingly, the “signature” pattern of pointing observed during true self-motion (notably an increase in arm azimuth velocity upon target approach) was absent during imagined pointing. Consequently, continuous pointing reveals a characteristic arm trajectory that is unique to actual self-motion. This appears to be an automatic, obligatory process that is not reproduced during a purely cognitive representation of self-motion in the absence of movement. This method has direct implications for several research areas, including spatial cognition and navigation.}, web_url = {http://www.journalofvision.org/8/6/1147/}, event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)}, event_place = {Naples, FL, USA}, state = {published}, DOI = {10.1167/8.6.1147}, author = {Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Siegle J{jsiegle}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Loomis JM{loomis}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5525, title = {Exploring a large maze in a limited size free-walking space}, year = {2008}, month = {4}, web_url = {http://www.cyberwalk-project.org/}, event_name = {Cyberwalk Workshop 2008}, event_place = {Tübingen, Germany}, state = {published}, author = {Engel D{engel}{Department Human Perception, Cognition and Action}; Curio C{curio}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5107, title = {High-precision capture of perceived self-motion with a continuous-pointing paradigm}, year = {2008}, month = {4}, file_url = {/fileadmin/user_upload/files/publications/CyberWalk%20Poster%20Small_[0].pdf}, web_url = {http://www.cyberwalk-project.org/}, event_name = {Cyberwalk Workshop 2008}, event_place = {Tübingen, Germany}, state = {published}, author = {Siegle JH{jsiegle}; Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Loomis JM{loomis}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 5461, title = {Visual, proprioceptive, and inertial cue-weighting in travelled distance perception}, year = {2008}, month = {4}, web_url = {http://www.cyberwalk-project.org/}, event_name = {Cyberwalk Workshop 2008}, event_place = {Tübingen, Germany}, state = {published}, author = {Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Butler JS{butler}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4545, title = {Gait parameter differences within an HMD as compared to the real world}, year = {2007}, month = {7}, volume = {10}, pages = {125}, abstract = {It has been shown that virtual environment (VE) users make systematic errors of distance compression when acting on or judging a virtual space (blind-walking to targets on the ground plane or verbal reports [1,3]). This bias in behavior can, in part, be explained by the mechanics of the head-mounted display (HMD). Willemsen et al. [3] have developed a modified HMD in which the visual display has been removed and yet weight distribution is kept consistent with that of a functional HMD. When participants view the real world through this modified HMD they undershoot their blind-walking performance. Willemsen et al.’s research suggests that the weight or ergonomics of the HMD influences the distance traversed while performing a blind walking task [2]. In the current research, we consider four different conditions: walking with eyes closed within the real world, eyes closed wearing a HMD, eyes open in the real world, and eyes open wearing a HMD. By investigating these four conditions we can access whether there are differences in gait parameters due to the physical constraints of the HMD and/or due to the differences between the visual experience in the HMD and the real world. Full-body motion tracking data was collected for six participants while they walked to a previously seen target at 8 randomly ordered distances (3,4,5,6,7, 8, 9 and 10 meters). We report three gait parameters for each of these four conditions: stride length, walking velocity, and head-trunk angle. This data reveals that these gait parameters within a HMD VE are different than those in the real world. A person wearing a HMD and backpack walks slower and takes a shorter stride length than they do in a comparable real world condition. In addition, head-trunk angle while walking to a target on the ground plane is lowest when walking with eyes open when wearing a HMD. While future research should investigate the influence of gait parameters on human perception of the active observer, the sole objective of the current research was to analyze the differences between gait parameters while walking within a HMD and the real world.}, web_url = {http://twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=mohler01}, event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)}, event_place = {Tübingen, Germany}, state = {published}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Campos JL{camposjl}{Department Human Perception, Cognition and Action}; Weyel MB{weyel}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Poster{ 4648, title = {Visually mismatched feedback within a head-mounted display affects a perceptual-motor but not a cognitive real world egocentric distance response}, journal = {Journal of Vision}, year = {2007}, month = {6}, volume = {7}, number = {9}, pages = {413}, abstract = {We describe an experiment in which mismatches between actual walking speed and visual indicators of the speed of self-motion within a head-mounted display (HMD) alter a perceptual-motor but not a cognitive indication of egocentric distance perception in the real world. We conducted a within-subject design experiment (pre- and post-test) that had four between subject conditions: 2 HMD feedback conditions (visually faster, visually slower) × 2 response measures in the real world (blind walking to previously viewed targets, verbal reports). While pre-tests revealed accurate real world direct blind walking performance, visually faster and slower post-tests showed an 11% undershoot and 17% overshoot, respectively. Verbal reports in the real world were approximately 81% of the actual distance both before and after HMD feedback. We previously found that verbal reports and blind walking in an HMD, which without feedback are both compressed, became near accurate after matched continuous visual feedback was given within the HMD. The same experience within the HMD had little or no effect on either real world blind walking or real-world verbal reports of distance (Mohler et al. 2006 APGV). The result from this previous experiment that both perceptual-motor and cognitive responses within the HMD were affected by feedback that was predominately perceptual-motor in nature suggests to us one of three explanations: (1) a common cognitive rule is applied across responses, (2) perceptual-motor and cognitive responses are adapted using different mechanisms producing similar changes, or (3) adaptation produces a general change in the perception of space. The current results showing a differential effect on two response measures in the real world, one perceptual-motor and the other cognitive, argues against an explanation exclusively involving changes in space perception and supports an account in which cognitive effects, perhaps combined with perceptual-motor adaptation, play a role.}, web_url = {http://journalofvision.org/7/9/413/}, event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/7.9.413}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; Creem-Regehr SH; Thompson WB} } @Poster{ 4704, title = {Absolute egocentric distance judgments are improved after motor and cognitive adaptation within HMD}, journal = {Journal of Vision}, year = {2006}, month = {6}, volume = {6}, number = {6}, pages = {725}, abstract = {In full-cue, real-world environments, people are accurate at visually-directed actions to targets on the ground. The same distance estimation tasks conducted in virtual environments using head-mounted display (HMD) systems show that people act as though the environment is smaller than intended. The explanation for this difference between actions in real and virtual environments is unknown. Our current study investigated influences of adaptation within the HMD on subsequent distance judgments. Two forms of feedback were evaluated using two distinctly different response measures. In a pre-test, subjects indicated egocentric distances to targets on the floor using one of two measures: blind walking or verbal reports. In the adaptation period, they experienced one of two interventions. A visual-motor intervention involved the continuous visual and motor feedback of walking with eyes open to previously viewed targets. A non-visual-motor intervention involved walking with eyes closed until the subjects were told that they reached the previously viewed target. After this adaptation period, subjects completed a post-test in which they performed the same task as in the pre-test. Distances were underestimated in the egocentric distances by approximately 30% for both tasks during the pre-test, consistent with previous results. Notably, subjects became significantly more accurate at both blind-walking to targets and verbal reports of distance after both adaptation experiences. The results indicate that both cognitive and motor indications of distance can be recalibrated in the HMD and suggests that recalibration can be caused by effects other than the interaction of sensory modalities indicating the speed of travel.}, web_url = {http://www.journalofvision.org/6/6/725/}, event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {doi:10.1167/6.6.725}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; Thompson WB; Creem-Regehr SH} } @Poster{ 4705, title = {Does perceptual-motor recalibration of locomotion depend on perceived self motion or the magnitude of optical flow?}, journal = {Journal of Vision}, year = {2005}, month = {9}, volume = {5}, number = {8}, pages = {386}, abstract = {The perceptual-motor calibration of human locomotion can be manipulated by exposure to an environment in which the visual flow associated with self-motion is altered relative to biomechanical walking speed (Rieser, Pick, Ashmead and Garing 1995, JEP:HPP). An open question remains as to whether this recalibration is based on perception of the speed of movement through the world or on the magnitude of optic flow itself. We addressed this issue using a treadmill-based virtual environment in which we could independently vary actual walking speed and the simulated visual experience of moving down a hallway. The hallway consisted of textured walls and textureless floor and ceiling, so that visual flow information was only available from the walls. Subjects were exposed to one of two conditions. Actual walking speed was 1.2m/s in both cases. In one condition, visual information corresponded to movement down a long hallway at a speed one third less than the biomechanical rate of walking. In the second condition, the visual information corresponded to movement three times faster than in the first condition, down a hallway that was three times larger. Because the scale of the space was increased by the same amount as the increase in velocity through the space, the magnitude of optic flow remained essentially constant, though flow due to the walls moved upward in the visual field. Perceptual-motor calibration was evaluated by having subjects walk blindfolded to previously viewed targets at 6, 8 and 10m before and after 10 minutes of walking on the treadmill. For the visually slower condition, subjects increased the distance they walked by an average of 10% between the pre and post tests. For the visually faster condition, subjects decreased the distance they walked by an average of 3%. These differences demonstrate that the recalibration depended at least in part on visual perception of the speed of self-movement, not just on the magnitude of optic flow.}, web_url = {http://www.journalofvision.org/5/8/386/}, event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {doi:10.1167/5.8.386}, author = {Thompson WB; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Creem-Regehr SH} } @Poster{ 4706, title = {Speed of visual flow affects comfortable walking speed}, journal = {Journal of Vision}, year = {2005}, month = {9}, volume = {5}, number = {8}, pages = {306}, abstract = {When a person is instructed to walk at a comfortable speed, their actual walking speed is influenced by the velocity of visual flow that they experience. This is a surprising result, given that “walking comfortably” would seem to depend on purely biomechanical factors. To demonstrate this result, we utilized a computer controlled treadmill with a 6′ by 10′ walking surface, surrounded by three 8′ by 8′ projection screens oriented to provide an approximately 180 degree horizontal field of view. Users were able to walk at any speed of their choosing on the treadmill, with the speed of the treadmill belt automatically adjusting as needed. Ten subjects participated. Each subject had three minutes to gain familiarity with the user-controlled treadmill. Following this, each subject was directed to walk comfortably in three different conditions, visually slower (0.5X), visually same (1.0X) and visually faster (2.0X) than their walking speed. The practice and each of the three conditions on the treadmill were separated by five minutes of walking around in an actual hallway. The conditions were randomly ordered for each subject. Subjects on average chose 1.41 m/s for the visually slower condition, 1.21 m/s for the visually faster condition and 1.29 m/s for the visually same condition. This indicates that subjects use the speed of the visual flow when deciding their own comfortable walking speed. The result is consistent with a previous finding of ours, using the same treadmill but with the belt speed under computer control, that the speed at which walk/run and run/walk transitions occur is also affected by the speed of visual flow (Mohler et al., 2004, SIGGRAPH-APGV).}, web_url = {http://www.journalofvision.org/5/8/306/}, event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {doi:10.1167/5.8.306}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; Creem-Regehr SH; Thompson WB} } @Poster{ 4707, title = {Perceived Slant is Greater from Far versus Near Distances}, journal = {Journal of Vision}, year = {2004}, month = {8}, volume = {4}, number = {8}, pages = {374}, abstract = {Previous studies have shown that visual perception of geographical slant is greatly overestimated (Proffitt et al., 1995). These studies positioned the observer's viewpoint near to the hill. Phenomenal experience often results in more distant hills looking “even steeper” than nearer slopes. This is consistent with a computational analysis which predicts that visual information for a slope is reduced with viewing distance. We conducted a study to systematically examine this phenomenon by varying observers' viewing distance from real hills and assessing perceptual and action-based measures of geographical slant. Participants viewed grassy hills from the base of the hill or from 70 meters away. They gave three estimates of the slant of the hill: a verbal judgment in degrees, a visual matching judgment, and a haptic adjustment of a tilting board with their unseen hand. The results indicated an increase in verbal, visual, and haptic judgments for the far versus near distance. These findings differ from previous studies that have typically found accurate haptic responses despite manipulations that have led to increased overestimation in verbal and visual responses. In the present study, manipulating viewing distance from the hill influenced both the available information for slant and whether a hill may be directly acted upon. The consistent increase for the three estimations suggests that when a hill falls outside of one's “action space”, motoric and perceptual responses may be informed by the same perceptual representation. Additional manipulations of farther viewing distances and availability of visual cues are needed to more fully account for this phenomenon.}, web_url = {http://www.journalofvision.org/4/8/374/}, event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/4.8.374}, author = {Creem-Regehr SH; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Thompson WB} } @Poster{ 4708, title = {Perceptual-Motor Recalibration on a Virtual Reality Treadmill}, journal = {Journal of Vision}, year = {2004}, month = {8}, volume = {4}, number = {8}, pages = {794}, abstract = {We have demonstrated that changes in the perceptual-motor calibration of human locomotion in a real environment can be caused by manipulations of a treadmill-based virtual environment. The experiment was patterned after the real-world study by Rieser, Pick, Ashmead and Garing (1995, JEP:HPP), using a computer controlled treadmill with a 6′ by 8′ walking surface, surrounded by three 8′ by 8′ projection screens oriented to provide an approximately 180 degree horizontal field of view. In a pre-test, subjects walked without vision to previously seen targets on the floor at distances of 6m, 8m, and 10m. They then walked on the treadmill at the rate of 1.0m/s in one of three conditions: visually faster (2.0×) than the walking rate, visually slower (0.5×) and matched visual and biomechanical speeds. In a post-test, subjects performed the same visually directed walking task as in the pre-test. When exposed to the visually faster condition, subjects undershot the distances in the post-test trials by an average of 5% relative to the pre-test. Given the visually slower condition, they overshot the distances in the post-trials by an average of 9%. In the visually same condition, subjects overshot by an average of 3%. These results show that a mismatch between visual flow of a virtual environment and biomechanical walking on a treadmill will recalibrate visually directed locomotion in the real world. The effects are similar to those found using real-world visual flow and treadmill walking. As a result, we can now probe aspects of perceptual-motor calibration that have proven to be difficult to investigate outside of a virtual environment. Perceptual-motor recalibration may also provide a methodology for investigating perception of both speed and distance in virtual environments, since it remains an open question whether or not speed perception when viewing computer graphics is subject to the same compression that affects distance judgments.}, web_url = {http://www.journalofvision.org/4/8/794/}, event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)}, event_place = {Sarasota, FL, USA}, state = {published}, DOI = {10.1167/4.8.794}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}; Thompson WB; Creem-Regehr SH; Willemsen P; Rieser JJ; Scholes J} } @Thesis{ 4693, title = {The effect of feedback within a virtual environment on human distance perception and adaptation}, year = {2007}, month = {1}, state = {published}, type = {PhD}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Miscellaneous{ Mohler2017_2, title = {Studying Anorexia Nervosa with VR Self-Avatars: Is Body Image a Matter of Perception or Attitude?}, journal = {Voices of VR Podcast}, year = {2017}, month = {9}, day = {13}, number = {576}, abstract = {Do patients with anorexia nervosa suffer from body image distortion due to how they perceive their body or is it due to attitudinal beliefs? Betty Mohler has been using VR technologies to study whether body representation is more perceptual or conceptual. She captures a 3D body scan of patients, and then uses algorithms to alter the body mass index of a virtual self-avatar from a range of plus and minus 20%. Patients then estimated their existing and desired body using a virtual mirror screen which tracked movements in real-time and showed realistic weight manipulations of photo-realistic virtual avatars. Mohler’s results challenge the existing assumption that patients with anorexia nervosa have visual distortions of their body, and that it’s possible that body image distortion is more driven by attitudinal factors where patients consider underweight bodies as more desirable and attractive. Mohler works at the Space & Body Perception Group at the Max Planck Institute for Biological Cybernetics. She’s collaborates with philosopher of neuroscience Dr. Hong Yu Wong to research foundational questions about self perception like “Who am I?” Where am I? Where is the origin of my self? Where is the frame of reference? What is the essence of me? How do we know that there’s an external world? What does it mean to have a shared self where multiple people share the same body experience? What does it mean to have a body? How big is my body? Is it possible to be at multiple locations at once while in VR? I interviewed Mohler for the third at the IEEE VR conference in Los Angeles this past March exploring all of these provocative questions (see my previous interviews on the uncanny valley and avatar stylization).}, web_url = {http://voicesofvr.com/576-studying-anorexia-nervosa-with-vr-self-avatars-is-body-image-a-matter-of-perception-or-attitude/}, state = {published}, author = {Mohler BJ{mohler}} } @Miscellaneous{ Mohler2016_2, title = {Automated Avatar Stylization to Counter the Uncanny Valley}, journal = {Voices of VR Podcast}, year = {2016}, month = {6}, day = {1}, number = {372}, web_url = {http://voicesofvr.com/372-automated-avatar-stylization-to-counter-the-uncanny-valley/}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2017_3, title = {Virtual reality technology as a tool for sports science}, year = {2017}, month = {9}, day = {8}, abstract = {Technologies associated with virtual reality, i.e. sensory display technology, motion capture, eye-tracking and computer vision techniques for abstracting information from the world have proven to be useful for training, assessing and evaluating sports performance. In the past five years, virtual reality has gone from being costly and under development (typically greater than 10K Euro) to mostly commodity prices. In this talk I will present some of the most exciting research findings in sports science that have used virtual reality technology, some examples of how virtual reality can control stimuli such as to better understand human motor control (i.e. Streuber et al. 2012 and von Lassberg et al. ) and a future vision that virtual reality technology can help in several areas of sports sciences, i.e. health across the lifespan; sports training; and sports evaluation.}, web_url = {https://www.mmsp.uni-konstanz.de/iacss2017/program/}, event_name = {11th International Symposium on Computer Science in Sport (IACSS 2017)}, event_place = {Konstanz, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ MolbertTSBKZMG2017, title = {Investigating body image disturbance in patients with anorexia nervosa using new biometric figure rating scales}, journal = {Journal of Psychosomatic Research}, year = {2017}, month = {7}, day = {1}, volume = {97}, pages = {161–162}, abstract = {Aims: Body image disturbance is a core symptom of anorexia nervosa (AN), and it is often assessed using Figure Rating Scales (FRS). Typically, FRS consist of a series of body drawings, and participants are asked to pick the body that corresponds best to their current and their desired body. So far, hardly any FRS is based on biometric data. Here, we use two new biometric FRS to investigate whether the presented weight spectrum influences a) accuracy in identifying the current weight and b) the desired weight in women with AN and controls. Method: Based on a statistical body model of human body shape and pose (Anguelov et al., 2005) and body scans of 2094 women from the CAESAR data set (Robinette et al., 1999) we generated biometric average bodies of women with predefined Body Mass Index (kg/m2, BMI). For the FRS 14-32 we used nine bodies with a BMI of 13.8 to 32.3 and for the FRS 18-42 we used nine bodies with BMI of 18 to 42. We administered the scales along with questionnaires assessing height, weight, body dissatisfaction, habits of social comparison and eating disorder symptoms to n= 104 women from the normal population (BMI= 23.90, SD=6.06) and n=24 women with anorexia nervosa (BMI= 15.07, SD=1.62). n=61 women from the normal population and n=18 women with AN completed both FRS. Results: In the FRS 18-42, both groups were accurate in picking the body that corresponded best to their current weight (average offset in weight steps: Controls M=0.12, SD=1.05; AN M=0.33, SD=0.97; F(1,120)=0.67, n.s.). In the FRS 14-32, women with AN were still accurate while controls significantly underestimated their size by about one step (Controls: M=-1.18, SD=0.97; AN M=0.10, SD=0.89; F(1,75)=27.32, p<.001). In both FRS, controls desired a body that was thinner than their actual body (FRS 18-42 M=-1.33, SD=1.72; FRS 14-32 M=-1.97, SD=1.23) and women with AN desired a body close to their actual weight (FRS 18-42 M=0.61, SD=0.61; FRS 18-42 M=0.36, SD=1.29). In the FRS 14-32, participants generally wanted a thinner body than in the FRS 18-42 (F(1)=23.54, p<.001). Conclusions: Our results suggest that the range of FRS can influence a) accuracy in identifying one’s weight and b) the desired weight. Different strategies, such as “comparing body features” versus “placing oneself in a range” could account for these differences. When interpreting FRS, the provided range should always be taken into account.}, web_url = {http://www.jpsychores.com/article/S0022-3999(17)30661-X/pdf}, event_name = {5th Annual Scientific Conference of the European Association of Psychosomatic Medicine (EAPM 2017)}, event_place = {Barcelona, Spain}, state = {published}, DOI = {10.1016/j.jpsychores.2017.03.270}, author = {M\"olbert S{smoelbert}; Thaler A{athaler}; Streuber S{stst}; Black M{black}; Karnath HO; Zipfel S; Mohler B{mohler}; Giel K} } @Conference{ MolbertTMSBKZG2017, title = {Assessing body image disturbance in patients with anorexia nervosa using biometric self-avatars in virtual reality: attitudinal components rather than visual body size estimation are distorted}, journal = {Journal of Psychosomatic Research}, year = {2017}, month = {6}, day = {29}, volume = {97}, pages = {162}, abstract = {Aims: Anorexia nervosa (AN) is a serious eating disorder that goes along with underweight and high rates of psychological and physical comorbidity. Body image disturbance is a core symptom AN, but as yet distinctive features of this disturbance are unknown. This study uses individual 3D-avatars in virtual reality to investigate the following questions: (1) Do women with AN differ from controls in how accurately they perceive their body weight and (2) in what body weight they desire? Method: We investigated n=24 women with AN (body mass index (kg/m2, BMI) M=15.17, SD=1.47) and n=24 healthy controls (BMI M=22.07, SD=1.85). Based on a 3D body scan, we created individual avatars for each participant. Each avatar was manipulated to represent +/5%, 10%, 15% and 20% of the participant’s weight. Avatars were presented on a stereoscopic life-size screen. Using an 1 Alternative Forced Choice (1AFC) task and a Method of Adjustment (MoA) task, participants were asked to identify/adjust their correct body weight and their desired weight. Additionally, eating pathology, body dissatisfaction and self-esteem were assessed. In a control experiment, we repeated all tasks with an avatar that had the participant’s body shape, but another person’s look. Results: Women with AN and controls underestimated their current weight, with a trend that women with AN underestimated even more than controls (1AFC: AN M=-7.38%, SD=4.71; Con M=-3.80%, SD=5.02; F(1,45)=6.35, p<.05; MoA: AN M=-5.94%, SD=5.81; Con M=-3.19%, SD=4.89, F(1,45)=3.09, p=.086). The discrepancy between desired and actual body weight suggested that both groups wanted to lose weight, and in percent of own body weight, controls even more so (AN M=-2.11%, SD=8.12; Con M=-9.08%, SD=6.13, F(1,45)=11.10, p<.01). Of note, the average desired body of the control group still had normal weight while the average desired body of women with AN had a BMI of 14.67, which would correspond to extreme AN. Correlation analyses revealed that desired body size, but not accuracy of body size estimation, was associated with eating disorder symptoms. The control experiment generally yielded the same result pattern. Conclusions: Our results contradict the widespread assumption that patients with AN overestimate their body size. Rather, they illustrate how fundamentally they prefer extremely thin bodies. According to our observations, clinical interventions should aim at helping patients with AN to change their desired weight.}, web_url = {http://www.jpsychores.com/article/S0022-3999(17)30662-1/pdf}, event_name = {5th Annual Scientific Conference of the European Association of Psychosomatic Medicine (EAPM 2017)}, event_place = {Barcelona, Spain}, state = {published}, DOI = {10.1016/j.jpsychores.2017.03.271}, author = {M\"olbert S{smoelbert}; Thaler A{athaler}; Mohler B{mohler}; Streuber S{stst}; Black M{black}; Karnath H-O; Zipfel S; Giel K} } @Conference{ Mohler2017, title = {Body Perception in Immersive Virtual Reality: The importance of personalized avatars for studying human perception and action}, year = {2017}, month = {3}, day = {30}, abstract = {Body perception has been shown to directly influence perception and action; and to be highly adaptive to new multi-sensory experiences. As has been shown in many virtual reality 1st person perspective empirical studies, humans adapt to different sized bodies and body parts, but also attitudes are influenced based on the body we experience. I will focus on virtual reality, personalized self-avatars, body ownership and statistical models of bodies as useful mechanism and tools for investigating body perception, action and attitudes.}, web_url = {http://www.krino.ch/BettyMohler.html}, event_name = {Philosophische Gesellschaft Bern: Krino}, event_place = {Bern, Switzerland}, state = {published}, author = {Mohler B{mohler}} } @Conference{ MohlerBSI2017, title = {Message from the Program Chairs}, year = {2017}, month = {3}, day = {20}, abstract = {We are pleased to present the technical papers for the 2017 IEEE Virtual Reality Conference (IEEE VR 2017), held March 18–22, 2017 in Los Angeles, California.}, web_url = {https://www.conference-publishing.com/program/VR17/APP/talk-vr17foreword-fm004-p.html}, event_name = {IEEE Virtual Reality (VR 2017)}, event_place = {Los Angeles, CA, USA}, state = {published}, DOI = {10.1109/VR.2017.7892221}, author = {Mohler B{mohler}; Babu SV; Steinicke F; Interrante V} } @Conference{ Mohler2016_5, title = {Virtual Reality and Embodiment}, year = {2016}, month = {12}, day = {6}, web_url = {https://www.medizin.uni-tuebingen.de/uktmedia/Forschung/PDF_Archiv/Brosch%C3%BCre+Homepage.pdf}, event_name = {Workshop Interoception, Emotion, and Embodiment}, event_place = {Tübingen, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ GeussCM2016, title = {Judging Affordances From Other Viewpoints: A Role of Perspective Taking?}, journal = {Abstracts of the Psychonomic Society}, year = {2016}, month = {11}, day = {19}, volume = {21}, pages = {56-57}, abstract = {Perspective taking and judging affordances share similar functional goals when determining whether an action is possible from a location dislocated from one’s current viewpoint. We tested the relationship between the two by measuring reaching affordances made from imagined locations around a table. We manipulated imagined self-location, target distance, and presence and length of an avatar arm, using an immersive virtual environment. First, in conditions without an avatar arm, we aimed to establish a baseline for reaching affordance judgments made from other perspectives, and include a novel assessment of response time for affordance judgments. Second, by manipulating visual arm length, we asked whether a change in body capabilities would influence affordance judgments from perspectives other than one’s own, suggesting a role for embodied perspective taking. Initial results suggest that reaching affordances were overestimated more from one’s physical location compared to imagined locations and response time varied with imagined location and distance of target.}, web_url = {http://c.ymcdn.com/sites/www.psychonomic.org/resource/resmgr/annual_meeting/2016_meeting/2016-PS-Abstract-Book.pdf}, event_name = {57th Annual Meeting of the Psychonomic Society}, event_place = {Boston, MA, USA}, state = {published}, author = {Geuss MN{mgeuss}; Creem-Regehr SH; Mohler BJ{mohler}} } @Conference{ Mohler2016_4, title = {Psychophysics of body size perception using biometric based self-avatars}, year = {2016}, month = {11}, day = {8}, web_url = {http://neuro-marseille.org/wp-content/uploads/2016/10/new-perspectives-on-embodiment-and-self-location.pdf}, event_name = {Workshop New Perspectives on Embodiment and Self-Location}, event_place = {Marseille, France}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2016_6, title = {The Importance of Avatars for Body Perception}, year = {2016}, month = {11}, web_url = {https://www.eurovr-association.org/conference2016/program/keynotes/14-betty}, event_name = {EuroVR Conference 2016}, event_place = {Athens, Greece}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2016_3, title = {Importance of Avatars for Virtual Reality}, year = {2016}, month = {9}, day = {30}, web_url = {http://www.cin.uni-tuebingen.de/news-events/browse-all-events/detail/view/338/page/1/conference-international-symposium-new-perspectives-in-integrative-neuroscience-day-1.html}, event_name = {International Symposium on "New Perspectives in Integrative Neuroscience”}, event_place = {Tübingen, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ TrojeBMMB2016, title = {People perception: Attractiveness from shape and motion}, journal = {Journal of Vision}, year = {2016}, month = {9}, volume = {16}, number = {12}, pages = {393}, abstract = {While there exists plenty of work on facial attractiveness only little is known about how the rest of the body determines the perception of another person. We were particularly interested in how the shape of the body and the way it moves contributes to attractiveness. Observers (20 male and 20 female) rated the attractiveness of 50 men and 50 women from the BML database each displayed in either of three ways in a 3D immersive virtual reality: (a) static bodies reconstructed from the motion capture data by means of MoSh (Loper et al. 2014, SIGGRAPH Asia) displayed as detailed 3D shapes ; (b) walking stick-figures (Troje 2002, JOV); (c) same bodies as above, but animated ith the corresponding walking movements. Correlations between all 12 sets of ratings (2 participant sex x 2 walker sex x 3 display types) reveal three different factors that contribute to the perception of attractiveness. The first factor is sexual dimorphism and applies to female attractiveness assigned to stick figures and moving meshes. The more feminine a woman, the more attractive she is rated. The second is characterized by increased vertical movement which makes the attractive walker appear bouncy, confident, and vigorous. It dominates male attractiveness assigned to stick-figures and moving meshes. The third factor is characterized by slim, tall body shape (attractive) as compared to stout and wider shapes and applies to ratings of static body shapes of both male and female walkers. Male and female observers agree in all cases. The way we move affects our appeal to others as much as the appearance of the static body. While sexual dimorphism dominates female attractiveness, it does not play much of a role for male attractiveness – neither in the shape nor in the motion domain.}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2550373}, event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)}, event_place = {St. Pete Beach, FL, USA}, state = {published}, DOI = {10.1167/16.12.393}, author = {Troje NF{niko}; Bieg A{abieg}; Mahmood N{nmahmood}; Mohler BJ{mohler}; Black MJ{black}} } @Conference{ Mohler2016, title = {Perception of Space and Bodies}, year = {2016}, month = {3}, day = {23}, abstract = {This notion of a virtual reality (VR), which is indistinguishable from the real world, has been addressed repeatedly in science fiction arts, literature and films. Plato’s Allegory of the Cave from the ancient world, and several science fiction movies from the modern era like “The Matrix”, “Surrogates”, “Avatar”, or “World on a Wire” are only some prominent examples, which play with this perceptual ambiguity, and constantly question whether our perceptions of reality are real or not. We explore perceptually-inspired and (super-) natural forms of interaction to seamlessly couple the space where the flat 2D digital world meets the three dimensions we live in. VR can be used as a tool to study human perception. Specifically empirical results about human perception, action and cognition can be used to improve VR technology and software. Many scientists are using VR to create ecologically valid and immersive sensory stimuli in a controlled way that would not be possible in the real world. More specifically, VR technology enables us to specifically manipulate the visual body, the contents of the virtual world, and the sensory stimulus (visual, vestibular, kinesthetic, tactile, and auditory) while performing or viewing an action. This panel will focus on several different areas, where perception research and VR technology has come together to improve state-of-the-art and advance our scientific knowledge. Specifically, in recent years a large amount of research has been performed in the area of locomotion, space (specifically depth), body, visual-motor and interaction perception. The panelist will briefly present their multi-disciplinary results and discuss what factors lead to successful multi-disciplinary research.}, web_url = {http://ieeevr.org/2016/program/panels/}, event_name = {IEEE Virtual Reality (VR 2016)}, event_place = {Greenville, SC, USA}, state = {published}, author = {Mohler BJ{mohler}} } @Conference{ MolbertTMSBKZG2016, title = {Untersuchung der Körperbildstörung bei Anorexia Nervosa mithilfe biometrischer Avatare in virtueller Realität}, year = {2016}, month = {3}, day = {18}, pages = {103}, abstract = {Hintergrund: Die Körperbildstörung ist ein Kernsymptom der Anorexia Nervosa (AN). Sie gilt als Indikator für eine schlechte Prognose, als schwierig zu therapieren und besteht oft auch nach Gewichtszunahme fort. Diese Studie verwendet individuelle 3D-Avatare, um folgende Forschungsfragen näher zu untersuchen: (1) Repräsentieren AN Patientinnen körperbezogene Informationen generell anders oder ist die Körperbildstörung rein selbstbezogen? (2) Ist die Körperbildstörung eher perzeptuell oder eher durch dysfunktionale Bewertungen charakterisiert? Methoden: Untersucht werden N=20 AN-Patientinnen, N=20 remittierte AN-Patientinnen und N=20 Kontrollprobandinnen. Von jeder Teilnehmerin wird auf Basis eines 3D-Körperscans ein individueller Avatar mit 9 verschiedenen BMI-Stufen gefertigt, nämlich aktueller BMI und +/- 5%, 10%, 15% und 20% BMI. Um die Rolle des Selbstbezugs zu bestimmen, wird basierend auf der Figur der Teilnehmerin eine zweite Avatar-Serie mit dem Aussehen einer fremden Person erstellt. Die Avatare werden in einer virtuelle Realität- Umgebung lebensgroß und in 3D präsentiert. Das Experiment folgt einem 2x2 gemischten Design mit den Faktoren Gruppe (AN versus Kontrolle) und Avatar-Serie (eigenes versus fremdes Aussehen), wobei es zwei unterschiedliche Aufgabenformate gibt: Im 2 Alternatives Forced Choice Task sieht die Teilnehmerin jeden Avatar 20 Mal für 2 Sekunden, anschließend muss sie entscheiden, ob dies der eigene bzw. richtige oder ein manipulierter Avatar war. Im Method of Adjustment Task soll die Teilnehmerin jeden der Avatare jeweils so verändern, dass er ihrem aktuellen bzw. dem richtigen Körper entspricht und zusätzlich jeweils so, dass er ihrem idealen Körper entspricht. Zusätzlich werden Selbstwert, Körperunzufriedenheit und Essstörungspathologie detailliert erfasst. Ergebnisse: Erste Ergebnisse von N=12 AN-Patientinnen zeigen für die Avatare mit eigenem Aussehen eine klare Neigung der Patientinnen, dünnere Avatare als den eigenen zu identifizieren bzw. einzustellen. Bei Avataren mit Aussehen einer fremden Person waren die AN-Patientinnen hingegen weitgehend akkurat. Bei den N=3 Kontrollprobanden zeigt sich zwar ein ähnliches Muster, jedoch deutlich weniger ausgeprägt. Diskussion: Unsere vorläufigen Ergebnisse weisen darauf hin, dass die Körperbildstörung bei AN-Patientinnen selbstbezogen und vor allem durch Bewertung charakterisiert ist und keine generell andere Wahrnehmung oder Verarbeitung von Körpern zugrunde liegt.}, web_url = {http://react-profile.org/ebook/Psycho2016/Abstractbook/files/assets/common/downloads/02_Psycho%202016_Abstractbuch.pdf}, event_name = {24. Jahrestagung der Deutschen Gesellschaft für Psychosomatische Medizin und Ärztliche Psychotherapie (DGPM 2016), 67. Tagung des Deutschen Kollegiums für Psychosomatische Medizin (DKPM 2016): Beziehung und Gesundheit}, event_place = {Potsdam, Germany}, state = {published}, author = {M\"olbert SC{smoelbert}; Thaler A{athaler}; Mohler B{mohler}; Streuber S{stst}; Black M{black}; Karnath H-O; Zipfel S; Giel KE} } @Conference{ Mohler2015_2, title = {Space and Body Perception}, year = {2015}, month = {9}, day = {23}, web_url = {http://www.cin.uni-tuebingen.de/fileadmin/content/05_News_%26_Events/Conferences/Conference_150923_CIN_JRG_Selection_Symposium.pdf}, event_name = {CIN JRG Selection Symposium}, event_place = {Tübingen, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2015_3, title = {Virtual Reality and Spatial/Body Perception}, year = {2015}, month = {5}, day = {3}, web_url = {http://gvents.de/bremen-tedxjacobsuniversity-2015/283052}, event_name = {TEDx Jacobs University}, event_place = {Bremen, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2015, title = {Virtual Reality Improving Passenger Comfort by Changing the Perception of Self and Space}, year = {2015}, month = {4}, day = {13}, web_url = {http://www.passengerexperienceconference.com/About/overviewof2015/Presentations/}, event_name = {Passenger Experience Conference (PEX 2015)}, event_place = {Hamburg, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Breidt2015, title = {Social Interactions in Virtual Reality: Challenges and Potential}, year = {2015}, month = {3}, day = {25}, abstract = {This panel will discuss the current technical challenges of social interaction in VR as well as the potential for using virtual reality as a social interface. We will consider face-to-face interactions, representations of full body avatars, and multi-user interaction. We will focus on discussing technical challenges, including tracking and animation, which are currently limiting social interactions in VR. The immense sensitivity of human perception to biological motion, either faces or full body motion, is driving the high requirements for any kind of animation in VR. In recent years, a lot of progress has been made in the field of facial animation and performance capture, mostly for use in feature animation or high-end game production, but little has made its way into the field of Virtual Reality so far. If the real-time requirements could be met without sacrificing too much quality, VR research could strongly benefit from these advances in order to develop compelling multi-user social interactions. The panelists are researchers leading the efforts to design, implement, and facilitate the next generation VR social interactions. Each panelist will provide a 5-10 minute presentations on the most pressing challenges for enabling VR social interaction from their perspective. Panelists will speak about their area of expertise in brief presentations, 5-10 minutes, on the topics below. Further, we will present successful applications that exist today that use virtual reality to enable social interactions and innovations that are needed for these applications to reach a larger audience. The moderators will then start a discussion, followed by questions from the audience.}, web_url = {http://ieeevr.org/2015/?q=node/31#Panel1_Social}, event_name = {IEEE Virtual Reality (VR 2015)}, event_place = {Arles, France}, state = {published}, author = {Trutoiu L{auract}; Breidt M{mbreidt}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}; Steed A} } @Conference{ Mohler2014_2, title = {Perception research using immersive virtual reality technology}, year = {2014}, month = {7}, day = {1}, abstract = {Using immersive virtual reality (VR) we manipulate the visual body, the contents of the virtual world, and the sensory stimulus while performing or viewing actions. Using state-of-the-art VR we manipulate these features in real-time and are able to do this through fast VR capture and rendering technology. My primary focus is space and body perception. Space perception is the ability to experience the world in three dimensions and the distances to and between objects in the world. Body perception is the experience we have of our physical selves (and parts of ourselves, i.e. hands, legs, torso). In our research group we focus on the perception of the size, shape and form of our surrounding world and our bodies. Our results show that the body (both physical and visual) is important for perceiving distances and sizes in the world and about the body. Further, people are able to communicate with others more effectively if they have body gestures available for both of the telepresence partners (avatar mediated communication). Finally we show that upper body language alone is important for recognition of emotions in natural narrative scenarios (story-telling).}, web_url = {http://crossworlds.info/conference/program/}, event_name = {CrossWorlds 2014: Theory, Development & Evaluation of Social Technology}, event_place = {Chemnitz, Germany}, state = {published}, author = {Mohler B{mohler}} } @Conference{ Mohler2014, title = {Comfort and perception of space}, year = {2014}, month = {4}, day = {1}, abstract = {The Space & Body Perception research group at the Max Planck Institute for Biological Cybernetics investigates whether we can change a person’s perception of room size and provide the illusion of a more spacious cabin by altering visual cues even in the presence of strong motion cues such as turbulence and in a confined space.}, web_url = {http://ieeevr.org/2014/schedule.html}, event_name = {IEEE Virtual Reality (VR 2014)}, event_place = {Minneapolis, MN, USA}, state = {published}, DOI = {10.1109/VR.2014.6802115}, author = {Mohler B{mohler}} } @Conference{ Mohler2013, title = {Perception of indoor and outdoor virtual spaces}, year = {2013}, month = {12}, day = {12}, web_url = {http://jvrc2013.sciencesconf.org/}, event_name = {5th Joint Virtual Reality Conference (JVRC 2013)}, event_place = {Paris, France}, state = {published}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ Mohler2013_3, title = {Avatars in Virtual Reality}, year = {2013}, month = {6}, volume = {13241}, pages = {61-62}, abstract = {Avatars are an increasingly popular research topic in the field of virtual reality. The first 20-30 minutes of our discussion was spent discussing exactly what participants meant by “Avatars” and it was clear that our definitions and needs for virtual humans fell into several categories. Avatars are often defined as digital models of people that either look or behave like the users they represent (see [1]). However, other terms like virtual humans (virtual characters that try to represent a human as close in fidelity as possible) or social agents (virtual characters that fulfill a certain purpose through artificial intelligence) are also often referred to as avatars. Avatars can be achieved in multiple ways, i.e. video based capture[3], pre-made avatars experienced as the user’s own body due to first-person perspective and visual-motor or visual-tactile stimulation (i.e. in [2]) and physical projections of video captured data[4]. These are just a few of the many manifestations of avatars in virtual reality. In order to achieve high-fidelity virtual agents that act in a human way many problems need to be solved by a multi-disciplinary research group. Virtual social agents must be able to move like humans, have casual conversation, appear intelligent, be interactive, be both reactive and proactive (specific to the user), be empathetic, perform certain functions, follow basic rules of proxemics, receive and give sensory feedback (visual, tactile, auditory). Some of the most promising applications for avatars and social agents in virtual reality are: telepresence, ergonomics/simulation, training, teaching and education, medical and health, basic science (understanding human behavior, see [5]), and of course gaming and entertainment. In this discussion time we had three breakout groups where we tried to define grand challenge examples for avatar research. One group discussed the challenges involved with the ability to remotely care for an elderly parent or remotely put your child to bed (as a second parent). These challenges involve communicating face to face, physical interaction (to comfort/support, to help with household tasks), observe monitor mental and physical health signs, the believable presence of the remote parent to a child (visual, voice, size) and the ability to embody a remote avatar. Another group discussed a scenario for avatars in the medical health profession and education of medical professionals, specifically where limited discourse is occurring. Important to these scenarios are the ability to build trust, convey empathy and have confidence in the sometimes uncertain or emotional information that is being shared. Finally, another group considered the challenge of being able to have a portable self-representation which could be brought into the virtual reality application you are using. The challenges here are system challenges of having a standard for virtual reality with regard to model, animation method and ethical issues with regard to data security. Specifically this group considered how the data for individual avatars might be collected, e.g. cameras only, motion capture suits, physiological measures such as heart rate, skin conductance and brain waves. Specifically, the question was raised: Which measures help increase fidelity and which ones go ethically too far?}, web_url = {http://www.dagstuhl.de/de/programm/kalender/semhp/?semnr=13241}, event_name = {Dagstuhl Seminar 13241: Virtual Realities}, event_place = {Dagstuhl, Germany}, state = {published}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ Mohler2013_2, title = {Perception & Action in Virtual Environments}, year = {2013}, month = {6}, pages = {52-53}, abstract = {In the Perception and Action in Virtual Environments research group, our aim is to investigate human behavior, perception and cognition using ecologically valid and immersive virtual environments. Virtual reality (VR) equipment enables our scientists to provide sensory stimulus in a controlled virtual world and to manipulate or alter sensory input that would not be possible in the real world. More specifically, VR technology enables us to specifically manipulate the visual body, the contents of the virtual world, and the sensory stimulus (visual, vestibular, kinesthetic, tactile, and auditory) while performing or viewing an action. Our group focuses on several different areas, all areas involve measuring human performance in complex everyday tasks, i.e. spatial judgments, walking, driving, communicating and spatial navigation. We investigate the impact of having an animated self-avatar on spatial perception, the feeling of embodiment or agency, and on the ability for two people to effectively communicate. Our goal is to use state-of-the-art virtual reality technology to better understand how humans perceive sensory information and act in the surrounding world. We use HMDs, large screen displays, motion simulators and sophisticated treadmills in combination with real-time rendering and control software and tools in order to immerse our participants in a virtual world. In this talk I will show videos of our technical setups and explain how I became interested in spatial perception in virtual reality. See specifically the following videos which are available on-line: http://www.youtube.com/user/MPIVideosProject.}, web_url = {http://www.dagstuhl.de/de/programm/kalender/semhp/?semnr=13241}, event_name = {Dagstuhl Seminar 13241: Virtual Realities}, event_place = {Dagstuhl, Germany}, state = {published}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ VolkovaMB2012, title = {Motion Capture of Emotional Body Language in Narrative Scenarios}, year = {2012}, month = {11}, volume = {13}, pages = {9}, abstract = {We interact with the world we live in by moving in it. The interaction is versatile and includes communications through speech and gestures, which serve as media to transmit ideas and emotions. A narrator, be it a professional actor on the stage or a friend telling an anecdote, expresses her ideas (the content) and feelings (the emotional colouring) through the choice of words and syntactical structures, her prosody, facial expressions and body language. Our present focus is on emotional body language, which became a field of intensive research several decades ago. Before psychopsysical experiments or trajectory analysis can take place, a set of mocap (motion capture) data has to be accumulated. This can be done with different equipment setups and by now human motion can be captured fairly precisely at a high frame rate. One of the major decisions for the researchers however is the choice of scenarios according to which the actors are to perform motion. This question is especially tricky when we deal with emotions, since the problems of sincerity and naturalness come into play. There are several ways to induce emotions and moods in people, but for motion capture the socalled imagination technique has been used most frequently. The actors are asked to evoke an emotion in themselves by recalling a past event. The main drawbacks of this technique in mocap are the following: (1) it is still impossible to ensure that the emotions are sincere and the motion is natural and not artificial or exaggerated; (2) the emotional categories often rapidly succeed each other in random fashion; (3) the emotional scenarios can be very abstract and taken out of context.We have developed an experimental setup where the emotional body language can be captured in a maximally natural yet controlled manner. The participants are asked to imagine they are narrating a fairy-tale to children. They perform several tasks on the text before their acting in recorded. The setup allows the actors to narrate the story at their own pace, move freely and does not require them to learn the text by heart, yet the recorded data can be easily extracted and processed after the motion capture session. The resulting extracted data can then analysed for various features or used in perceptual experiments.}, web_url = {http://www.neuroschool-tuebingen-nena.de/}, event_name = {13th Conference of the Junior Neuroscientists of Tübingen (NeNA 2012): Science and Education as Social Transforming Agents}, event_place = {Schramberg, Germany}, state = {published}, author = {Volkova EP{evolk}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ DobrickiMB2012_3, title = {The ownership of a virtual body induced by visuo-tactile stimulation indicates the alteration of self-boundaries}, journal = {Cognitive Processing}, year = {2012}, month = {9}, day = {7}, volume = {13}, number = {Supplement 1}, pages = {S18}, abstract = {Watching a virtual body (avatar) being stroked while one’s own body is being synchronously stroked has been shown to elicit the experience of bodily ownership over the avatar in the viewer. Previously this has been interpreted such that individuals take exclusively ownership over the avatar. However, it should be considered that due to the sensory integration of visual and tactile percepts avatar ownership could be the result of a decrease of differentiation between (visual) non-self and (tactile) self-percepts. Hence, in this case individuals would incorporate an avatar, because the boundaries of what they experience as ‘‘themselves’’ get altered. We have used a head-mounted display based setup in which participants viewed an avatar from behind within a virtual city. We stroked the participants’ body while they watched the avatar getting synchronously stroked. Subsequently, we assessed their avatar and their spatial presence experience with a questionnaire, and then repeated the initial treatment. Finally, we rotated the participants’ perspective around their vertical axis for 1 min. During rotation the avatar was in the same location in front of the viewer. Participants were asked to indicate when they started to experience self-motion. They reported higher identification with the avatar and showed a later onset of visually induced self-motion perception after visuo-tactile stimulation. Overall, our results indicate that there was a decrease of differentiation between non-self and self-percepts. Hence, we propose that avatar ownership should not be understood as a ‘‘body swapping’’, but as an integration of the avatar within an individual’s multimodal self-boundaries.}, web_url = {http://link.springer.com/content/pdf/10.1007%2Fs10339-012-0510-8.pdf}, event_name = {Fifth International Conference on Spatial Cognition (ICSC 2012)}, event_place = {Roma, Italy}, state = {published}, DOI = {10.1007/s10339-012-0510-8}, author = {Dobricki M{mdobricki}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ DobrickiMB2012_2, title = {The structure of self-experience during visuo-tactile stimulation of a virtual and the physical body}, journal = {Seeing and Perceiving}, year = {2012}, month = {6}, day = {22}, volume = {25}, number = {0}, pages = {214}, abstract = {The simultaneous visuo-tactile stimulation of an individual’s body and a virtual body (avatar) is an experimental method used to investigate the mechanisms of self-experience. Studies incorporating this method found that it elicits the experience of bodily ownership over the avatar. Moreover, as part of our own research we found that it has also an effect on the experience of agency, spatial presence, as well as on the perception of self-motion, and thus on self-localization. However, it has so far not been investigated whether these effects represent distinct categories within conscious experience. We stroked the back of 21 male participants for three minutes while they watched an avatar getting synchronously stroked within a virtual city in a head-mounted display setup. Subsequently, we assessed their avatar and their spatial presence experience with 23 questionnaire items. The analysis of the responses to all items by means of nonmetric multidimensional scaling resulted in a two-dimensional map (stress=0.151) on which three distinct categories of items could be identified: a cluster (Cronbach’s alpha=.89) consisting of all presence items, a cluster (Cronbach’s alpha=.88) consisting of agency-related items, and a cluster (Cronbach’s alpha=.93) consisting of items related to body ownership as well as self-localization. The reason that spatial presence formed a distinct category could be that body ownership, self-localization and agency are not reported in relation to space. Body ownership and self-localization belonged to the same category which we named identification phenomena. Hence, we propose the following three higher-order categories of self-experience: identification, agency, and spatial presence.}, web_url = {http://booksandjournals.brillonline.com/content/10.1163/187847612x648413}, event_name = {13th International Multisensory Research Forum (IMRF 2012)}, event_place = {Oxford, UK}, state = {published}, DOI = {10.1163/187847612X648413}, author = {Dobricki M{mdobricki}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ LinkenaugerMB2011_2, title = {Welcome to wonderland: The apparent size of the self-avatar hands and arms influences perceived size and shape in virtual environments}, journal = {Perception}, year = {2011}, month = {9}, volume = {40}, number = {ECVP Abstract Supplement}, pages = {46}, abstract = {According to the functional approach to the perception of spatial layout, angular optic variables that indicate extents are scaled to the body and its action capabilities [cf Proffitt, 2006 Perspectives on Psychological Science 1(2) 110–122]. For example, reachable extents are perceived as a proportion of the maximum extent to which one can reach, and the apparent sizes of graspable objects are perceived as a proportion of the maximum extent that one can grasp (Linkenauger et al, 2009 Journal of Experimental Psychology: Human Perceptiion and Performance; 2010 Psychological Science). Therefore, apparent sizes and distances should be influenced by changing scaling aspects of the body. To test this notion, we immersed participants into a full cue virtual environment. Participants’ head, arm and hand movements were tracked and mapped onto a first-person, self-representing avatar in real time. We manipulated the participants’ visual information about their body by changing aspects of the self-avatar (hand size and arm length). Perceptual verbal and action judgments of the sizes and shapes of virtual objects’ (spheres and cubes) varied as a function of the hand/arm scaling factor. These findings provide support for a body-based approach to perception and highlight the impact of self-avatars’ bodily dimensions for users’ perceptions of space in virtual environments.}, web_url = {http://pec.sagepub.com/content/40/1_suppl.toc}, event_name = {34th European Conference on Visual Perception}, event_place = {Toulouse, France}, state = {published}, DOI = {10.1177/03010066110400S102}, author = {Linkenauger S{sally}{Department Human Perception, Cognition and Action}; Mohler BJ{mohler}{Department Human Perception, Cognition and Action}; B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}} } @Conference{ 6785, title = {Self-Avatars in Immersive Virtual Environments as a Tool to Investigate Embodied Perception}, year = {2010}, month = {9}, day = {30}, web_url = {http://www.bodyrep.ethz.ch/}, event_name = {Body Representation in Physical and Virtual Reality with Application to Rehabilitation}, event_place = {Monte Veritá, Switzerland}, state = {published}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ Mohler2010, title = {Self-Avatars in Immersive Virtual Environments as a Tool to investigate Embodied Perception}, year = {2010}, month = {7}, day = {19}, abstract = {A rendering of a representation of one's own body in a head mounted display (HMD) virtual environment (VE) is a useful tool for investigating embodied perception. Currently, few HMD VE systems display a rendering of the users own body. Subjectively, this often leads to a sense of disembodiment in the VE. In a recent study, we found that the experience with an avatar changed the typical pattern of distance underestimation seen in many HMD studies. Users showed an increase in distance estimations with avatar experience, especially when the avatar was animated in correspondence with their own body-movements. Additionally, we investigated the impact of experience with an animated avatar on other common tasks within a HMD VE (locomotion, object interaction and social interaction). We found that pre-exposure to an animated avatar had no significant effect on these behaviors. Most recently we investigated the impact of self-avatars on the ability of multi-users in a HMD VE to communicate. We found that, especially when in 3rd person perspective, the animation of the avatars increased the rate of communication. We believe that immersive VEs provide great potential to further investigate embodied perception.}, web_url = {http://wwwmath.uni-muenster.de/HotNews/show_artikel.php?id=2433&brettid=47}, event_name = {Westfälische Wilhelms-Universität: Fachbereich 10 Mathematik und Informatik}, event_place = {Münster, Germany}, state = {published}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ 6667, title = {Self-Avatars in Immersive Virtual Environments as a Tool to investigate Embodied Perception}, year = {2010}, month = {6}, day = {18}, abstract = {A rendering of a representation of one‘s own body in a head mounted display (HMD) virtual environment (VE) is a useful tool for investigating embodied perception. Currently, few HMD VE systems display a rendering of the users own body. Subjectively, this often leads to a sense of disembodiment in the VE. In a recent study, we found that the experience with an avatar changed the typical pattern of distance underestimation seen in many HMD studies. Users showed an increase in distance estimations with avatar experience, especially when the avatar was animated in correspondence with their own body-movements. Additionally, we investigated the impact of experience with an animated avatar on other common tasks within a HMD VE (locomotion, object interaction and social interaction). We found that pre-exposure to an animated avatar had no significant effect on these behaviors. Most recently we investigated the impact of self-avatars on the ability of multi-users in a HMD VE to communicate. We found that, especially when in 3rd person perspective, the animation of the avatars increased the rate of communication. We believe that immersive VEs provide great potential to further investigate embodied perception.}, web_url = {http://www.eventlab-ub.org/index.php?menu=events&FP=p&P=3}, event_name = {Event Lab: Universitat de Barcelona}, event_place = {Barcelona, Spain}, state = {published}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ MeilingerM2010, title = {Spatial Cognition and Virtual Characters}, year = {2010}, month = {2}, day = {22}, web_url = {http://www.cin.uni-tuebingen.de/news-events/browse-all-events/detail/view/338/page/3/conference-symposium-neural-encoding-of-perception-and-action.html}, event_name = {Symposium "Neural Encoding of Perception and Action"}, event_place = {Tübingen, Germany}, state = {published}, author = {Meilinger T{meilinger}{Department Human Perception, Cognition and Action}; Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ 6124, title = {Avatars in Immersive Virtual Environments}, year = {2009}, month = {8}, day = {24}, abstract = {Few HMD-based virtual environment systems display a rendering of the user’s own body. Subjectively, this often leads to a sense of disembodiment in the virtual world. We explore the effect of being able to see one’s own body in such systems on an objective measure of the accuracy of one form of space perception. Using an action-based response measure, we found that participants who explored near space while seeing fully-articulated and tracked visual representation of themselves subsequently made more accurate judgments of absolute egocentric distance to locations ranging from 4m to 6m away from where they were standing than did participants who saw no avatar. A non-animated avatar also improved distance judgments, but by a lesser amount. Participants who viewed either animated or static avatars positioned 3m in front of their own position made subsequent distance judgments with similar accuracy to the participants who viewed the equivalent animated or static avatar positioned at their own location. I will discuss the implications of these results on theories of embodied perception in virtual environments.}, event_name = {Brown University: Department of Cognitive & Linguistic Sciences}, event_place = {Providence, RI, USA}, state = {published}, author = {Mohler BJ{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ 5578, title = {MPI Research and Avatars Walking in Virtual Reality}, year = {2008}, month = {6}, day = {2}, pages = {23}, web_url = {http://www.dagstuhl.de/en/program/calendar/semhp/?semnr=08231}, event_name = {Dagstuhl Seminar 08231: Virtual Realities}, event_place = {Dagstuhl, Germany}, state = {published}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ 5579, title = {Human Perception and Virtual Environments}, year = {2007}, month = {7}, event_name = {Departmental Colloquium: Max Planck Institute for Biological Cybernetics}, event_place = {Tübingen, Germany}, state = {published}, author = {Mohler B{mohler}{Department Human Perception, Cognition and Action}} } @Conference{ 5582, title = {Computer graphics research at the University of Utah: What you should know about graduate school}, year = {2004}, month = {5}, event_name = {Host: Dr. Roger Webster}, state = {published}, author = {Mohler B{mohler}} } @Conference{ 5580, title = {Interactions between visual information for self-motion and locomotion}, year = {2004}, month = {5}, event_name = {Host: Dr. Jack Loomis}, state = {published}, author = {Mohler B{mohler}} } @Conference{ 5581, title = {Interactions between visual information for self-motion and locomotion}, year = {2004}, month = {5}, event_name = {Host: Dr. Dennis Proffitt}, event_place = {Psychology Department}, state = {published}, author = {Mohler B{mohler}} }