@inproceedings{AlizadehTolmieLeeetal.2024, author = {Alizadeh, Fatemeh and Tolmie, Peter and Lee, Minha and Wintersberger, Philipp and Pins, Dominik and Stevens, Gunnar}, title = {Voice Assistants' Accountability through Explanatory Dialogues}, booktitle = {Dubiel, Leiva et al. (Eds.): CUI '24: Proceedings of the 6th ACM Conference on Conversational User Interfaces, Luxembourg, July 8 - 10, 2024}, isbn = {979-8-4007-0511-3}, doi = {10.1145/3640794.3665557}, institution = {Fachbereich Wirtschaftswissenschaften}, pages = {29}, year = {2024}, abstract = {As voice assistants (VAs) become more advanced leveraging Large Language Models (LLMs) and natural language processing, their potential for accountable behavior expands. Yet, the long-term situational effectiveness of VAs' accounts when errors occur remains unclear. In our 19-month exploratory study with 19 households, we investigated the impact of an Alexa feature that allows users to inquire about the reasons behind its actions. Our findings indicate that Alexa's accounts are often single, decontextualized responses that led to users' alternative repair strategies over the long term, such as turning off the device, rather than initiating a dialogue about what went wrong. Through role-playing workshops, we demonstrate that VA interactions should facilitate explanatory dialogues as dynamic exchanges that consider a range of speech acts, recognizing users' emotional states and the context of interaction. We conclude by discussing the implications of our findings for the design of accountable VAs.}, language = {en} }