@techreport{HerpersSaitov2014, author = {Rainer Herpers and Timur Saitov}, title = {6-MIG Project: Multi-User InteractionSystem for CAVE-type VR Environments}, isbn = {978-3-96043-016-2}, issn = {1869-5272}, doi = {10.18418/978-3-96043-016-2}, url = {https://nbn-resolving.org/urn:nbn:de:hbz:1044-opus-266}, institution = {Fachbereich Informatik}, series = {Technical Report / University of Applied Sciences Bonn-Rhein-Sieg. Department of Computer Science}, pages = {64}, year = {2014}, abstract = {The objective of this research project is to develop a user-friendly and cost-effective interactive input device that allows intuitive and efficient manipulation of 3D objects (6 DoF) in virtual reality (VR) visualization environments with flat projections walls. During this project, it was planned to develop an extended version of a laser pointer with multiple laser beams arranged in specific patterns. Using stationary cameras observing projections of these patterns from behind the screens, it is planned to develop an algorithm for reconstruction of the emitter’s absolute position and orientation in space. Laser pointer concept is an intuitive way of interaction that would provide user with a familiar, mobile and efficient navigation though a 3D environment. In order to navigate in a 3D world, it is required to know the absolute position (x, y and z position) and orientation (roll, pitch and yaw angles) of the device, a total of 6 degrees of freedom (DoF). Ordinary laser-based pointers when captured on a flat surface with a video camera system and then processed, will only provide x and y coordinates effectively reducing available input to 2 DoF only. In order to overcome this problem, an additional set of multiple (invisible) laser pointers should be used in the pointing device. These laser pointers should be arranged in a way that the projection of their rays will form one fixed dot pattern when intersected with the flat surface of projection screens. Images of such a pattern will be captured via a real-time camera-based system and then processed using mathematical re-projection algorithms. This would allow the reconstruction of the full absolute 3D pose (6 DoF) of the input device. Additionally, multi-user or collaborative work should be supported by the system, would allow several users to interact with a virtual environment at the same time. Possibilities to port processing algorithms into embedded processors or FPGAs will be investigated during this project as well.}, language = {en} }