

The Summer EXPO 2025 for HCI/HCS, CS and GE was a great success! A large number of visitors were able to experience up to 120 different demos and projects.

How do immersive technologies change the way we perceive and understand stories? The new DFG Research Training Group TESDA investigates this question. Prof. Marc Erich Latoschik and his team from the HCI Group are significantly involved with three subprojects on storytelling in immersive virtual reality.

The AIL team contributed to the event with three demonstrators, two questionnaires, and training material on AI literacy.
Recent Publications
Unobtrusive In-Situ Measurement of Behavior Change by Deep Metric Similarity Learning of Motion Patterns.
2025.
[Download] [BibSonomy]
,
[Download] [BibSonomy]
@misc{merz2025unobtrusiveinsitumeasurementbehavior,
author = {Christian Merz and Lukas Schach and Marie Luisa Fiedler and Jean-Luc Lugrin and Carolin Wienrich and Marc Erich Latoschik},
url = {https://arxiv.org/abs/2509.04174},
year = {2025},
title = {Unobtrusive In-Situ Measurement of Behavior Change by Deep Metric Similarity Learning of Motion Patterns}
}
Abstract:
Improving Mid-Air Sketching in Room-Scale Virtual Reality with Dynamic Color-to-Depth and Opacity Cues, In IEEE Transactions on Visualization and Computer Graphics.
2025. To be published.
[BibSonomy]
,
[BibSonomy]
@article{monty2025improving,
author = {Samantha Monty and Dennis Alexander Mevißen and Marc Erich Latoschik},
journal = {IEEE Transactions on Visualization and Computer Graphics},
year = {2025},
title = {Improving Mid-Air Sketching in Room-Scale Virtual Reality with Dynamic Color-to-Depth and Opacity Cues}
}
Abstract:
Immersive 3D mid-air sketching systems liberate users from the confines of traditional 2D sketching canvases. However, complications from perceptual challenges in Virtual Reality (VR), combined with the ergonomic and cognitive challenges of sketching in all three dimensions in mid-air lower the accuracy and aesthetic quality of 3D sketches. This paper explores how color-to-depth and opacity cues support users to create and perceive freehand, 3D strokes in room-scale sketching, unlocking a full 360° of freedom for creation. We implemented three graphic depth shader cues modifying the (1) alpha, (2) hue, and (3) value levels of a single color to dynamically adjust the color and transparency of meshes relative to their depth from the user. We investigated how these depth cues influence sketch efficiency, sketch quality, and total sketch experience with 24 participants in a comparative, counterbalanced, 4 x 1 within-subjects user study. First, with our graphic depth shader cues we could successfully transfer results of prior research in seated sketching tasks to room-scale scenarios. Our color-to-depth cues improved the similarity of sketches to target models. This highlights the usefulness of the color-to-depth approach even for the increased range of motion and depth in room-scale sketching. Second, our shaders assisted participants to complete tasks faster, spend a greater percentage of task time sketching, reduced the feeling of mental tiredness and improved the feeling of sketch efficiency in room-scale sketching. We discuss these findings and share our insights and conclusions to advance the research on improving spatial cognition in immersive sketching systems.
The Stability of Plausibility and Presence in Claustrophobic Virtual Reality Exposure Therapy, In Proceedings of the Mensch Und Computer 2025, p. 181–192. New York, NY, USA:
Association for Computing Machinery,
2025.
[Download] [BibSonomy] [Doi]
,
[Download] [BibSonomy] [Doi]
@inproceedings{noauthororeditor2025stability,
author = {Joanna Grause and Larissa Brübach and Franziska Westermeier and Carolin Wienrich and Marc Erich Latoschik},
url = {https://doi.org/10.1145/3743049.3743068},
year = {2025},
booktitle = {Proceedings of the Mensch Und Computer 2025},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {MuC '25},
pages = {181–192},
doi = {10.1145/3743049.3743068},
title = {The Stability of Plausibility and Presence in Claustrophobic Virtual Reality Exposure Therapy}
}
Abstract:
The Impact of Performance-Specific Feedback from a Virtual Coach in a Virtual Reality Exercise Application, In IEEE International Symposium on Mixed and Augmented Reality (ISMAR).
IEEE Computer Society,
2025. (accepted for publication at ISMAR2025)
[BibSonomy]
,
[BibSonomy]
@inproceedings{zimmerer2025feedback,
author = {Andrea Zimmerer and Lydia Bartels and Marc Erich Latoschik},
year = {2025},
booktitle = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
publisher = {IEEE Computer Society},
title = {The Impact of Performance-Specific Feedback from a Virtual Coach in a Virtual Reality Exercise Application}
}
Abstract:
Virtual reality (VR) exercise applications are promising tools, e.g., for at-home training and rehabilitation. However, existing applications vary significantly in key design choices such as environments, embodiment, and virtual coaching, making it difficult to derive clear design guidelines. A prominent design choice is the use of embodied virtual coaches, which guide user interaction and provide feedback. In a user study with 76 participants, we investigated how different levels of performance specificity in feedback from an embodied virtual coach affect intermediate factors, such as VR experience, motivation, and coach perception. Participants performed lower-body movement exercises, i.e., Leg Raises and Knee Extensions, commonly used in knee rehabilitation. We found that highly performance-specific feedback led to higher scores compared to medium specificity for perceived realism, as well as the anthropomorphism and sympathy of the virtual coach, but did not affect motivation. Based on our findings, we propose the design suggestion to include precise, performance-specific details when creating feedback for a virtual coach. We observed a descriptive pattern of higher scores in the low specificity condition compared to the medium condition on most measures, which raises the possibility that less specific feedback may, in some cases, be perceived more positively than moderately specific feedback. These findings provide valuable insights into how design choices impact relevant intermediate factors that are crucial for maximizing both workout effectiveness and the quality of the virtual coaching experience.
The Impact of AI-Based Real-Time Gesture Generation and Immersion on the Perception of Others and Interaction Quality in Social XR, In IEEE Transactions on Visualization and Computer Graphics.
2025. To be published
[BibSonomy]
,
[BibSonomy]
@article{merz2025impact,
author = {Christian Merz and Niklas Krome and Carolin Wienrich and Stefan Kopp and Marc Erich Latoschik},
journal = {IEEE Transactions on Visualization and Computer Graphics},
year = {2025},
title = {The Impact of AI-Based Real-Time Gesture Generation and Immersion on the Perception of Others and Interaction Quality in Social XR}
}
Abstract:
This study explores how people interact in dyadic social eXtended Reality (XR), focusing on two main factors: the animation type of a conversation partner’s avatar and how immersed the user feels in the virtual environment. Specifically, we investigate how 1) idle behavior, 2) AI-generated gestures, and 3) motion-captured movements from a confederate (a controlled partner in the study) influence the quality of conversation and how that partner is perceived. We examined these effects in both symmetric interactions (where both participants use VR headsets and controllers) and asymmetric interactions (where one participant uses a desktop setup). We developed a social XR platform that supports asymmetric device configurations to provide varying levels of immersion. The platform also supports a modular avatar animation system providing idle behavior, real-time AI-generated co-speech gestures, and full-body motion capture. Using a 2×3 mixed design with 39 participants, we measured users’ sense of spatial presence, their perception of the confederate, and the overall conversation quality. Our results show that users who were more immersed felt a stronger sense of presence and viewed their partner as more human-like and believable. Surprisingly, however, the type of avatar animation did not significantly affect conversation quality or how the partner was perceived. Participants often reported focusing more on what was said rather than how the avatar moved.
A Systematic Review of Fusion Methods for the User-Centered Design of Multimodal Interfaces, In Proceedings of the 27th International Conference on Multimodal Interaction (ICMI '25).
Association for Computing Machinery,
2025.
[BibSonomy]
,
[BibSonomy]
@inproceedings{heinrich2025systematic,
author = {Ronja Heinrich and Chris Zimmerer and Martin Fischbach and Marc Erich Latoschik},
year = {2025},
booktitle = {Proceedings of the 27th International Conference on Multimodal Interaction (ICMI '25)},
publisher = {Association for Computing Machinery},
title = {A Systematic Review of Fusion Methods for the User-Centered Design of Multimodal Interfaces}
}
Abstract:
This systematic review investigates the current state of research on multimodal fusion methods, i.e., the joint analysis of multimodal inputs, for intentional, instruction-based human-computer interactions, focusing on the combination of speech and spatially expressive modalities such as gestures, touch, pen, and gaze.
We examine 50 systems from a User-Centered Design perspective, categorizing them by modality combinations, fusion strategies, application domains and media, as well as reusability. Our findings highlight a predominance of descriptive late fusion methods, limited reusability, and a lack of standardized tool support, hampering rapid prototyping and broader applicability. We identify emerging trends in machine learning-based fusion and outline future research directions to advance reusable and user-centered multimodal systems.
The Interwoven Nature of Spatial Presence and Virtual Embodiment: A Comprehensive Perspective, In Frontiers in Virtual Reality, Vol. 6.
2025.
[Download] [BibSonomy] [Doi]
,
[Download] [BibSonomy] [Doi]
@article{halbig-interwoven,
author = {Andreas Halbig and Marc Erich Latoschik},
journal = {Frontiers in Virtual Reality},
url = {https://www.frontiersin.org/journals/virtual-reality/articles/10.3389/frvir.2025.1616662/full},
year = {2025},
volume = {6},
doi = {10.3389/frvir.2025.1616662},
title = {The Interwoven Nature of Spatial Presence and Virtual Embodiment: A Comprehensive Perspective}
}
Abstract:
VIA-VR: A Platform to Streamline the Development of Virtual Reality Serious Games for Healthcare, In 2025 IEEE 38th International Symposium on Computer-Based Medical Systems (CBMS), pp. 463-468.
2025.
[Download] [BibSonomy] [Doi]
,
[Download] [BibSonomy] [Doi]
@inproceedings{11058787,
author = {Samuel Truman and Sebastian Von Mammen},
url = {https://ieeexplore.ieee.org/document/11058787},
year = {2025},
booktitle = {2025 IEEE 38th International Symposium on Computer-Based Medical Systems (CBMS)},
pages = {463-468},
doi = {10.1109/CBMS65348.2025.00098},
title = {VIA-VR: A Platform to Streamline the Development of Virtual Reality Serious Games for Healthcare}
}
Abstract: