@inproceedings{AlizadehEsauStevensetal.2020, author = {Fatemeh Alizadeh and Margarita Esau and Gunnar Stevens and Lena Cassens}, title = {eXplainable AI: Take one Step Back, Move two Steps forward}, series = {Hansen, N{\"u}rnberger et al. (Hg.): Mensch und Computer 2020 - Workshopband}, publisher = {Gesellschaft f{\"u}r Informatik e.V.}, address = {Bonn}, doi = {10.18420/muc2020-ws111-369}, year = {2020}, abstract = {In 1991 the researchers at the center for the Learning Sciences of Carnegie Mellon University were confronted with the confusing question of “where is AI” from the users, who were interacting with AI but did not realize it. Three decades of research and we are still facing the same issue with the AItechnology users. In the lack of users’ awareness and mutual understanding of AI-enabled systems between designers and users, informal theories of the users about how a system works (“Folk theories”) become inevitable but can lead to misconceptions and ineffective interactions. To shape appropriate mental models of AI-based systems, explainable AI has been suggested by AI practitioners. However, a profound understanding of the current users’ perception of AI is still missing. In this study, we introduce the term “Perceived AI” as “AI defined from the perspective of its users”. We then present our preliminary results from deep-interviews with 50 AItechnology users, which provide a framework for our future research approach towards a better understanding of PAI and users’ folk theories.}, language = {en} }