@inproceedings{YoussefKoineMueller2019, author = {Youssef Mahmoud Youssef and Linda Koine and Martin E. M{\"u}ller}, title = {Inducing Explainable Rules about Distributed Robotic Systems for Fault Detection \& Diagnosis}, series = {30th International Workshop on Principles of Diagnosis DX'19, November 11-13, 2019, Klagenfurt, Austria}, year = {2019}, abstract = {This work presents the preliminary research towards developing an adaptive tool for fault detection and diagnosis of distributed robotic systems, using explainable machine learning methods. Autonomous robots are complex systems that require high reliability in order to operate in different environments. Even more so, when considering distributed robotic systems, the task of fault detection and diagnosis becomes exponentially difficult. To diagnose systems, models representing the behaviour under investigation need to be developed, and with distributed robotic systems generating large amount of data, machine learning becomes an attractive method of modelling especially because of its high performance. However, with current day methods such as artificial neural networks (ANNs), the issue of explainability arises where learnt models lack the ability to give explainable reasons behind their decisions. This paper presents current trends in methods for data collection from distributed systems, inductive logic programming (ILP); an explainable machine learning method, and fault detection and diagnosis.}, language = {en} }