@mastersthesis{Vokuda2019, type = {Master Thesis}, author = {Vokuda, Priyanka Subramanya}, title = {Interactive Object Detection}, isbn = {978-3-96043-072-8}, issn = {1869-5272}, doi = {10.18418/978-3-96043-072-8}, institution = {Fachbereich Informatik}, series = {Technical Report / Hochschule Bonn-Rhein-Sieg University of Applied Sciences. Department of Computer Science}, number = {01-2019}, school = {Hochschule Bonn-Rhein-Sieg}, pages = {vii, 65}, year = {2019}, abstract = {The success of state-of-the-art object detection methods depend heavily on the availability of a large amount of annotated image data. The raw image data available from various sources are abundant but non-annotated. Annotating image data is often costly, time-consuming or needs expert help. In this work, a new paradigm of learning called Active Learning is explored which uses user interaction to obtain annotations for a subset of the dataset. The goal of active learning is to achieve superior object detection performance with images that are annotated on demand. To realize active learning method, the trade-off between the effort to annotate (annotation cost) unlabeled data and the performance of object detection model is minimised. Random Forests based method called Hough Forest is chosen as the object detection model and the annotation cost is calculated as the predicted false positive and false negative rate. The framework is successfully evaluated on two Computer Vision benchmark and two Carl Zeiss custom datasets. Also, an evaluation of RGB, HoG and Deep features for the task is presented. Experimental results show that using Deep features with Hough Forest achieves the maximum performance. By employing Active Learning, it is demonstrated that performance comparable to the fully supervised setting can be achieved by annotating just 2.5\% of the images. To this end, an annotation tool is developed for user interaction during Active Learning.}, language = {en} }