@inproceedings{928102bb7b7441beb8e0e321c1fc7351,
title = "Human-Centric Ontology Evaluation: Process and Tool Support",
abstract = "As ontologies enable advanced intelligent applications, ensuring their correctness is crucial. While many quality aspects can be automatically verified, some evaluation tasks can only be solved with human intervention. Nevertheless, there is currently no generic methodology or tool support available for human-centric evaluation of ontologies. This leads to high efforts for organizing such evaluation campaigns as ontology engineers are neither guided in terms of the activities to follow nor do they benefit from tool support. To address this gap, we propose HERO - a Human-Centric Ontology Evaluation PROcess, capturing all preparation, execution and follow-up activities involved in such verifications. We further propose a reference architecture of a support platform, based on HERO. We perform a case-study-centric evaluation of HERO and its reference architecture and observe a decrease in the manual effort up to 88% when ontology engineers are supported by the proposed artifacts versus a manual preparation of the evaluation.",
keywords = "ontology evaluation, process model, human-in-the-loop",
author = "Stefani Tsaneva and Klemens K{\"a}sznar and Sabou, {Reka Marta}",
year = "2022",
month = sep,
day = "20",
doi = "10.1007/978-3-031-17105-5_14",
language = "English",
isbn = "978-3-031-17104-8",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "182--197",
editor = "Oscar Corcho and Laura Hollink and Oliver Kutz and Nicolas Troquard and {J. Ekaputra}, Fajar",
booktitle = "Knowledge Engineering and Knowledge Management",
address = "Germany",
edition = "1",
}