% % This file was created by the Typo3 extension % sevenpack version 0.7.14 % % --- Timezone: CEST % Creation date: 2017-05-23 % Creation time: 12-52-08 % --- Number of references % 18 % @Article { SaultonBdD2017, title = {Cultural differences in room size perception}, journal = {PLoS One}, year = {2017}, month = {4}, volume = {12}, number = {4}, pages = {1-12}, abstract = {Cultural differences in spatial perception have been little investigated, which gives rise to the impression that spatial cognitive processes might be universal. Contrary to this idea, we demonstrate cultural differences in spatial volume perception of computer generated rooms between Germans and South Koreans. We used a psychophysical task in which participants had to judge whether a rectangular room was larger or smaller than a square room of reference. We systematically varied the room rectangularity (depth to width aspect ratio) and the viewpoint (middle of the short wall vs. long wall) from which the room was viewed. South Koreans were significantly less biased by room rectangularity and viewpoint than their German counterparts. These results are in line with previous notions of general cognitive processing strategies being more context dependent in East Asian societies than Western ones. We point to the necessity of considering culturally-specific cognitive processing strategies in visual spatial cognition research.}, department = {Department B{\"u}lthoff}, web_url = {http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0176115\&type=printable}, DOI = {10.1371/journal.pone.0176115}, EPUB = {e0176115}, author = {Saulton, A and B{\"u}lthoff, HH and de la Rosa, S and Dodds, TJ} } @Article { SaultonMBD2016, title = {Egocentric biases in comparative volume judgments of rooms}, journal = {Journal of Vision}, year = {2016}, month = {4}, volume = {16}, number = {6:2}, pages = {1-16}, abstract = {The elongation of a figure or object can induce a perceptual bias regarding its area or volume estimation. This bias is notable in Piagetian experiments in which participants tend to consider elongated cylinders to contain more liquid than shorter cylinders of equal volume. We investigated whether similar perceptual biases could be found in volume judgments of surrounding indoor spaces and whether those judgments were viewpoint dependent. Participants compared a variety of computer-generated rectangular rooms with a square room in a psychophysical task. We found that the elongation bias in figures or objects was also present in volume comparison judgments of indoor spaces. Further, the direction of the bias (larger or smaller) depended on the observer's viewpoint. Similar results were obtained from a monoscopic computer display (Experiment 1) and stereoscopic head-mounted display with head tracking (Experiment 2). We used generalized linear mixed-effect models to model participants' volume judgments using a function of room depth and width. A good fit to the data was found when applying weight on the depth relative to the width, suggesting that participants' judgments were biased by egocentric properties of the space. We discuss how biases in comparative volume judgments of rooms might reflect the use of simplified strategies, such as anchoring on one salient dimension of the space.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Mohler}, web_url = {http://jov.arvojournals.org/article.aspx?articleid=2512824\&resultClick=1}, DOI = {10.1167/16.6.2}, author = {Saulton, A and Mohler, B and B{\"u}lthoff, HH and Dodds, TJ} } @Article { SaultonDBd2015, title = {Objects exhibit body model like shape distortions}, journal = {Experimental Brain Research}, year = {2015}, month = {5}, volume = {233}, number = {5}, pages = {1471-1479}, abstract = {Accurate knowledge about size and shape of the body derived from somatosensation is important to locate one’s own body in space. The internal representation of these body metrics (body model) has been assessed by contrasting the distortions of participants’ body estimates across two types of tasks (localization task vs. template matching task). Here, we examined to which extent this contrast is linked to the human body. We compared participants’ shape estimates of their own hand and non-corporeal objects (rake, post-it pad, CD-box) between a localization task and a template matching task. While most items were perceived accurately in the visual template matching task, they appeared to be distorted in the localization task. All items’ distortions were characterized by larger length underestimation compared to width. This pattern of distortion was maintained across orientation for the rake item only, suggesting that the biases measured on the rake were bound to an item-centric reference frame. This was previously assumed to be the case only for the hand. Although similar results can be found between non-corporeal items and the hand, the hand appears significantly more distorted than other items in the localization task. Therefore, we conclude that the magnitude of the distortions measured in the localization task is specific to the hand. Our results are in line with the idea that the localization task for the hand measures contributions of both an implicit body model that is not utilized in landmark localization with objects and other factors that are common to objects and the hand.}, department = {Department B{\"u}lthoff}, web_url = {http://link.springer.com/content/pdf/10.1007\%2Fs00221-015-4221-0.pdf}, DOI = {10.1007/s00221-015-4221-0}, author = {Saulton, A and Dodds, TJ and B{\"u}lthoff, HH and de la Rosa, S} } @Article { VolkovaMDTB2014_2, title = {Emotion categorization of body expressions in narrative scenarios}, journal = {Frontiers in Psychology}, year = {2014}, month = {6}, volume = {5}, number = {623}, pages = {1-11}, abstract = {Humans can recognize emotions expressed through body motion with high accuracy even when the stimuli are impoverished. However, most of the research on body motion has relied on exaggerated displays of emotions. In this paper we present two experiments where we investigated whether emotional body expressions could be recognized when they were recorded during natural narration. Our actors were free to use their entire body, face, and voice to express emotions, but our resulting visual stimuli used only the upper body motion trajectories in the form of animated stick figures. Observers were asked to perform an emotion recognition task on short motion sequences using a large and balanced set of emotions (amusement, joy, pride, relief, surprise, anger, disgust, fear, sadness, shame, and neutral). Even with only upper body motion available, our results show recognition accuracy significantly above chance level and high consistency rates among observers. In our first experiment, that used more classic emotion induction setup, all emotions were well recognized. In the second study that employed narrations, four basic emotion categories (joy, anger, fear, and sadness), three non-basic emotion categories (amusement, pride, and shame) and the “neutral” category were recognized above chance. Interestingly, especially in the second experiment, observers showed a bias toward anger when recognizing the motion sequences for emotions. We discovered that similarities between motion sequences across the emotions along such properties as mean motion speed, number of peaks in the motion trajectory and mean motion span can explain a large percent of the variation in observers' responses. Overall, our results show that upper body motion is informative for emotion recognition in narrative scenarios.}, department = {Department B{\"u}lthoff}, department2 = {Research Group Mohler}, web_url = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.00623/pdf}, DOI = {10.3389/fpsyg.2014.00623}, author = {Volkova, EP and Mohler, BJ and Dodds, TJ and Tesch, J and B{\"u}lthoff, HH} } @Article { HeydrichDAHBMB2013_2, title = {Visual capture and the experience of having two bodies: evidence from two different virtual reality techniques}, journal = {Frontiers in Psychology}, year = {2013}, month = {12}, volume = {4}, number = {946}, pages = {1-15}, abstract = {In neurology and psychiatry the detailed study of illusory own body perceptions has suggested close links between bodily processing and self-consciousness. One such illusory own body perception is heautoscopy where patients have the sensation of being reduplicated and to exist at two or even more locations. In previous experiments, using a video head-mounted display, self-location and self-identification were manipulated by applying conflicting visuo-tactile information. Yet the experienced singularity of the self was not affected, i.e., participants did not experience having multiple bodies or selves. In two experiments presented in this paper, we investigated self-location and self-identification while participants saw two virtual bodies (video-generated in study 1 and 3D computer generated in study 2) that were stroked either synchronously or asynchronously with their own body. In both experiments, we report that self-identification with two virtual bodies was stronger during synchronous stroking. Furthermore, in the video generated setup with synchronous stroking participants reported a greater feeling of having multiple bodies than in the control conditions. In study 1, but not in study 2, we report that self-location – measured by anterior posterior drift – was significantly shifted towards the two bodies in the synchronous condition only. Self-identification with two bodies, the sensation of having multiple bodies, and the changes in self-location show that the experienced singularity of the self can be studied experimentally. We discuss our data with respect to ownership for supernumerary hands and heautoscopy. We finally compare the effects of the video and 3D computer generated head-mounted display technology and discuss the possible benefits of using either technology to induce changes in illusory self-identification with a virtual body.}, department = {Department B{\"u}lthoff}, web_url = {http://www.frontiersin.org/Journal/DownloadFile.ashx?pdf=1\&FileId=21551\&articleId=66408\&Version=1\&ContentTypeId=21\&FileName=fpsyg-04-00946.pdf}, DOI = {10.3389/fpsyg.2013.00946}, author = {Heydrich, L and Dodds, TJ and Aspell, JE and Herbelin, B and B{\"u}lthoff, HH and Mohler, BJ and Blanke, O} } @Article { DoddsMB2011, title = {Talk to the Virtual Hands: Self-Animated Avatars Improve Communication in Head-Mounted Display Virtual Environments}, journal = {PLoS One}, year = {2011}, month = {10}, volume = {6}, number = {10}, pages = {1-12}, abstract = {Background When we talk to one another face-to-face, body gestures accompany our speech. Motion tracking technology enables us to include body gestures in avatar-mediated communication, by mapping one's movements onto one's own 3D avatar in real time, so the avatar is self-animated. We conducted two experiments to investigate (a) whether head-mounted display virtual reality is useful for researching the influence of body gestures in communication; and (b) whether body gestures are used to help in communicating the meaning of a word. Participants worked in pairs and played a communication game, where one person had to describe the meanings of words to the other. Principal Findings In experiment 1, participants used significantly more hand gestures and successfully described significantly more words when nonverbal communication was available to both participants (i.e. both describing and guessing avatars were self-animated, compared with both avatars in a static neutral pose). Participants ‘passed’ (gave up describing) significantly more words when they were talking to a static avatar (no nonverbal feedback available). In experiment 2, participants' performance was significantly worse when they were talking to an avatar with a prerecorded listening animation, compared with an avatar animated by their partners' real movements. In both experiments participants used significantly more hand gestures when they played the game in the real world. Conclusions Taken together, the studies show how (a) virtual reality can be used to systematically study the influence of body gestures; (b) it is important that nonverbal communication is bidirectional (real nonverbal feedback in addition to nonverbal communication from the describing participant); and (c) there are differences in the amount of body gestures that participants use with and without the head-mounted display, and we discuss possible explanations for this and ideas for future investigation.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2011/Dodds-TalktotheVirtualHands-PLoSOne-2011.pdf}, department = {Department B{\"u}lthoff}, web_url = {http://www.plosone.org/article/fetchObjectAttachment.action;jsessionid=BB8DF7295C39A064E5FFC9839EC935BD.ambra01?uri=info\%3Adoi\%2F10.1371\%2Fjournal.pone.0025759\&representation=PDF}, DOI = {10.1371/journal.pone.0025759}, EPUB = {e25759}, author = {Dodds, TJ and Mohler, BJ and B{\"u}lthoff, HH} } @Article { 6094, title = {Using mobile group dynamics and virtual time to improve teamwork in large-scale collaborative virtual environments}, journal = {Computers \& Graphics}, year = {2009}, month = {4}, volume = {33}, number = {2}, pages = {130-138}, abstract = {Mobile group dynamics (MGDs) assist synchronous working in collaborative virtual environments (CVEs), and virtual time (VT) extends the benefits to asynchronous working. The present paper describes the implementation of MGDs (teleporting, awareness and multiple views) and VT (the utterances of 23 previous users were embedded in a CVE as conversation tags), and their evaluation using an urban planning task. Compared with previous research using the same scenario, the new MGD techniques produced substantial increases in the amount that, and distance over which, participants communicated. With VT participants chose to listen to a quarter of the conversations of their predecessors while performing the task. The embedded VT conversations led to a reduction in the rate at which participants traveled around, but an increase in live communication that took place. Taken together, the studies show how CVE interfaces can be improved for synchronous and asynchronous collaborations, and highlight possibilities for future research.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/computersgraphics09_6094[0].pdf}, department = {Department B{\"u}lthoff}, web_url = {http://www.sciencedirect.com/science?_ob=MImg\&_imagekey=B6TYG-4VG5HY5-1-10\&_cdi=5618\&_user=29041\&_pii=S0097849309000144\&_orig=search\&_coverDate=04\%2F30\%2F2009\&_sk=999669997\&view=c\&wchp=dGLbVzb-zSkzk\&md5=1da33bd23cab40845ffa464e4a4068a8\&ie=/sdarticle.pdf}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, language = {en}, DOI = {doi:10.1016/j.cag.2009.01.001}, author = {Dodds, TJ and Ruddle, RA} } @Inproceedings { DoddsMdSB2011, title = {Embodied Interaction in Immersive Virtual Environments with Real Time Self-animated Avatars}, year = {2011}, month = {5}, pages = {132-135}, abstract = {This paper outlines our recent research that is providing users with a 3D avatar representation, and in particular focuses on studies in which the avatar is self-animated in real time. We use full body motion tracking, so when participants move their hands and feet, these movements are mapped onto the avatar. In a recent study (Dodds et al., CASA 2010), we found that a self-animated avatar aided participants in a communication task in a head-mounted display immersive virtual environment (VE). From the perspective of communication, we discovered it was not only important for the person speaking to be self-animated, but also for the person listening to us. Further, we show the potential of immersive VEs for investigating embodied interaction, and highlight possibilities for future research.}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2011/CHI-2011-Dodds.pdf}, department = {Department B{\"u}lthoff}, web_url = {http://www.antle.iat.sfu.ca/chi2011_EmbodiedWorkshop/}, web_url2 = {http://www.elisevandenhoven.com/publications/antle-chi11wp.pdf}, editor = {Antle A.N. , P. Marshall P, E. Van Den Hoven}, publisher = {ACM Press}, address = {New York, NY, USA}, event_place = {Vancouver, BC}, event_name = {Workshop Embodied Interaction: Theory and Practice in HCI (CHI 2011)}, author = {Dodds, TJ and Mohler, BJ and de la Rosa, S and Streuber, S and B{\"u}lthoff, HH} } @Inproceedings { 6541, title = {A Communication Task in HMD Virtual Environments: Speaker and Listener Movement Improves Communication}, year = {2010}, month = {6}, pages = {1-4}, abstract = {In this paper we present an experiment which investigates the influence of animated real-time self-avatars in immersive virtual environments on a communication task. Further we investigate the influence of 1st and 3rd person perspectives and the influence of tracked speaker and listener. We find that people perform best in our communication task when both the speaker and the listener have an animated self-avatar and when the speaker is in the 3rd person. The more people move the better they perform in the communication task. These results suggest that when two people in a virtual environment are animated then they do use gestures to communicate.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/casa_final_6541[0].pdf}, department = {Department B{\"u}lthoff}, web_url = {http://casa2010.inria.fr/}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Saint-Malo, France}, event_name = {23rd Annual Conference on Computer Animation and Social Agents (CASA 2010)}, language = {en}, author = {Dodds, TJ and Mohler, BJ and B{\"u}lthoff, HH} } @Inproceedings { 6093, title = {Using Teleporting, Awareness and Multiple Views to Improve Teamwork in Collaborative Virtual Environments}, year = {2008}, month = {5}, pages = {81-88}, abstract = {Mobile Group Dynamics (MGDs) are a suite of techniques that help people work together in large-scale collaborative virtual environments (CVEs). The present paper describes the implementation and evaluation of three additional MGDs techniques (teleporting, awareness and multiple views) which, when combined, produced a 4 times increase in the amount that participants communicated in a CVE and also significantly increased the extent to which participants communicated over extended distances in the CVE. The MGDs were evaluated using an urban planning scenario using groups of either seven (teleporting + awareness) or eight (teleporting + awareness + multiple views) participants. The study has implications for CVE designers, because it provides quantitative and qualitative data about how teleporting, awareness and multiple views improve groupwork in CVEs.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/egve08_6093[0].pdf}, department = {Department B{\"u}lthoff}, web_url = {http://diglib.eg.org/handle/10.2312/EGVE.EGVE08.081-088}, editor = {Van Liere, R. , B.J. Mohler}, publisher = {The Eurographics Association}, address = {Aire-la-Ville, Switzerland}, booktitle = {Virtual Environments 2008}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Eindhoven, Netherlands}, event_name = {14th Eurographics Symposium on Virtual Environments (EGVE 2008)}, language = {en}, ISBN = {978-3-905674-06-4}, DOI = {10.2312/EGVE/EGVE08/081-088}, author = {Dodds, TJ and Ruddle, RA} } @Inproceedings { 6092, title = {Mobile group dynamics in large-scale collaborative virtual environments}, year = {2008}, month = {3}, pages = {59-66}, abstract = {We have developed techniques called mobile group dynamics (MGDs), which help groups of people to work together while they travel around large-scale virtual environments. MGDs explicitly showed the groups that people had formed themselves into, and helped people move around together and communicate over extended distances. The techniques were evaluated in the context of an urban planning application, by providing one batch of participants with MGDs and another with an interface based on conventional collaborative virtual environments (CVEs). Participants with MGDs spent nearly twice as much time in close proximity (within 10m of their nearest neighbor), communicated seven times more than participants with a conventional interface, and exhibited real-world patterns of behavior such as staying together over an extended period of time and regrouping after periods of separation. The study has implications for CVE designers, because it shows how MGDs improves groupwork in CVEs.}, url = {http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/ieeevr08_6092[0].pdf}, web_url = {http://conferences.computer.org/vr/2008/prelim/}, editor = {Lin, M.C. , A. Steed, C. Cruz-Neira}, publisher = {IEEE}, address = {Piscataway, NJ, USA}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Reno, NV, USA}, event_name = {IEEE Virtual Reality Conference (VR 2008)}, language = {en}, ISBN = {978-1-4244-1971-5}, DOI = {10.1109/VR.2008.4480751}, author = {Dodds, TJ and Ruddle, RA} } @Poster { SaultonDB2015, title = {Holistic Versus Analytic Perception of Indoor Spaces: Korean and German Cultural Differences in Comparative Judgments of Room Size}, year = {2015}, month = {3}, day = {13}, pages = {18}, abstract = {We demonstrate that German and South Korean cultures perceive the size of surrounding indoor spaces differently. While Koreans seem to attend to all aspects/dimensions of rooms when comparing their size, Germans anchored on one single dimension of the space (egocentric depth) resulting in biases in room size perception.}, department = {Department B{\"u}lthoff}, web_url = {http://www.psychologicalscience.org/convention/icps_program/pdf/Poster-Session-V.pdf}, event_place = {Amsterdam, The Netherlands}, event_name = {International Convention of Psychological Science (ICPS 2015)}, author = {Saulton, A and Dodds, T and B{\"u}lthoff, HH} } @Poster { SaultonDBd2014, title = {Body and objects representations are associated with similar distortions}, journal = {Journal of Vision}, year = {2014}, month = {5}, day = {19}, volume = {14}, number = {10}, pages = {845}, abstract = {Stored representations of body size and shape as derived from somatosensation (body model) are considered to be critical components of perception and action. It is commonly believed that the body model can be measured using a localization task and be distinguished from other visual representations of the body using a visual template matching task. Specifically, localization tasks have shown distorted hand representations consisting of an overestimation of hand width and an underestimation of finger length [Longo and Haggard, 2010, PNAS,107 (26), 11727-11732]. In contrast, template matching tasks indicate that visual hand representations (body image) do not show such distortions [Longo and Haggard, 2012, Acta Psychologica, 141, 164-168]. We examined the specificity of the localization and visual template matching tasks to measure body related representations. Participants conducted a localization and template matching task with objects (box, post-it, rake) and their own hand. The localization task revealed that all items' dimensions were significantly distorted (all p <.0018) except for the width of the hand and rake. In contrast, the template matching task indicated no significant differences between the estimated and actual item's shape for all items (all p>0.05) except for the box (p<0.01) suggesting that the visual representation of items is almost veridical. Moreover, the performance across these tasks was significantly correlated for the hand and rake (p<.001). Overall, these results show that effects considered to be body-specific, i.e. distortions of the body model, are actually more general than previously thought as they are also observed with objects. Because localizing points on an object is unlikely to be aided by somatosensation, the assessed representations are unlikely to be mainly based on somatosensation but might reflect more general cognitive processes e.g. visual memory. These findings have important implications for the nature of the body image and the body model.}, department = {Department B{\"u}lthoff}, web_url = {http://www.journalofvision.org/content/14/10/845}, event_place = {St. Pete Beach, FL, USA}, event_name = {14th Annual Meeting of the Vision Sciences Society (VSS 2014)}, DOI = {10.1167/14.10.845}, author = {Saulton, A and Dodds, T and B{\"u}lthoff, HH and de la Rosa, S} } @Poster { VolkovaMDTB2013, title = {Perception of emotional body expressions in narrative scenarios}, year = {2013}, month = {8}, pages = {135}, abstract = {People use body motion to express and recognise emotions. We investigated whether emotional body expressions can be recognised when they are recorded during natural narration, where actors freely express the emotional colouring of a story told. We then took only the upper body motion trajectories and presented them to participants in the form of animated stick figures. The observers were asked to categorise the emotions expressed in short motion sequences. The results show that recognition level of eleven emotions shown via upper body is significantly above chance level and the responses to motion sequences are consistent across observers.}, department = {Department B{\"u}lthoff}, web_url = {https://www.scss.tcd.ie/conferences/SAP2013/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_place = {Dublin, Ireland}, event_name = {ACM Symposium on Applied Perception (SAP '13)}, ISBN = {978-1-4503-2262-1}, DOI = {10.1145/2492494.2501892}, author = {Volkova, EK and Mohler, BJ and Dodds, T and Tesch, J and B{\"u}lthoff, HH} } @Poster { SaultonDTMB2013, title = {The influence of shape and culture on visual volume perception of virtual rooms}, year = {2013}, month = {8}, pages = {142}, abstract = {The ability of humans to apprehend the overall size or volume of an indoor space is not well understood. Previous research has highlighted a 'rectangularity illusion', in which rectangular rooms appear to be larger than square rooms of the same size (identical volume), showing that the subjective perceived space cannot be explained from the mathematical formula for volume, i.e. length \(\times\) width \(\times\) height. Instead, the results suggest that one might use the longest dimension of the space as a simplified strategy to assess room size [Sadalla and Oxley 1984].}, url = {http://www.kyb.tuebingen.mpg.defileadmin/user_upload/files/publications/2013/SAP-2013-Saulton.pdf}, department = {Department B{\"u}lthoff}, web_url = {https://www.scss.tcd.ie/conferences/SAP2013/}, publisher = {ACM Press}, address = {New York, NY, USA}, event_place = {Dublin, Ireland}, event_name = {ACM Symposium on Applied Perception (SAP '13)}, ISBN = {978-1-4503-2262-1}, DOI = {10.1145/2492494.2501900}, author = {Saulton, A and Dodds, TJ and Tesch, J and Mohler, BJ and B{\"u}lthoff, HH} } @Poster { 6997, title = {Changing our perception of communication in virtual environments}, journal = {Perception}, year = {2010}, month = {8}, volume = {39}, number = {ECVP Abstract Supplement}, pages = {183}, abstract = {When people communicate face-to-face they use gestures and body language that naturally coincide with speech [McNeill, 2007, Gesture \& Thought, University of Chicago Press.]. In an immersive virtual environment (VE) we can control both participants' visual feedback of self and the other in order to investigate the effect of gestures on a communication task. In our experiment the communication task is to make the listener say a word without the speaker saying the word. We use animated real-time self-avatars in immersive VEs to answer the question: `Does the use of naturalistic gestures help communication in VEs'. Specifically, we perform a within-subject experiment which investigates the influence of first- and third-person perspectives, and of animated speaker and listener. We find that people significantly perform better in the communication task when both the speaker and listener have an animated self-avatar and when the camera for the speaker shows a third-person perspective. When participants moved more they also performed better in the task. These results suggest that when two people in a VE are animated they do use gestures to communicate. These results demonstrate that in addition to the speaker movements, the listener movements are important for efficient communication in an immersive VE.}, web_url = {http://pec.sagepub.com/content/39/1_suppl.toc}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Lausanne, Switzerland}, event_name = {33rd European Conference on Visual Perception}, language = {en}, DOI = {10.1177/03010066100390S101}, author = {Dodds, TJ and Mohler, BJ and B{\"u}lthoff, HH} } @Conference { Dodds2011, title = {Telecommunication in Virtual Reality with Self-animated Avatars}, year = {2011}, month = {6}, department = {Department B{\"u}lthoff}, web_url = {http://beaming-eu.org/beaming2011}, institution = {Max Planck Institute for Cybernetics}, event_place = {Barcelona, Spain}, event_name = {BEAMING 2011 Workshop: Real Actions in Virtual Environments (RAVE 2011)}, author = {Dodds, TJ} } @Conference { 6998, title = {Communication in Virtual Environments}, year = {2010}, month = {10}, day = {1}, department = {Department B{\"u}lthoff}, web_url = {http://www.interaction-design.org/references/conferences/proceedings_of_the_joint_virtual_reality_conference_of_egve_-_eurovr_-_vec.html}, institute = {Biologische Kybernetik}, organization = {Max-Planck-Gesellschaft}, event_place = {Stuttgart, Germany}, event_name = {2010 Joint Virtual Reality Conference of EuroVR - EGVE - VEC (JVRC 2010)}, language = {en}, author = {Dodds, TJ} }