Bibtex export

 

@article{ Schaible2020,
 title = {Evaluation Infrastructures for Academic Shared Tasks: Requirements and Concept Design for Search and Recommendation Scenarios},
 author = {Schaible, Johann and Breuer, Timo and Tavakolpoursaleh, Narges and Müller, Bernd and Wolff, Benjamin and Schaer, Philipp},
 journal = {Datenbank-Spektrum : Zeitschrift für Datenbanktechnologien und Information Retrieval},
 number = {1},
 pages = {29-36},
 volume = {20},
 year = {2020},
 issn = {1610-1995},
 doi = {https://doi.org/10.1007/s13222-020-00335-x},
 urn = {https://nbn-resolving.org/urn:nbn:de:0168-ssoar-86702-6},
 abstract = {Academic search systems aid users in finding information covering specific topics of scientific interest and have evolved from early catalog-based library systems to modern web-scale systems. However, evaluating the performance of the underlying retrieval approaches remains a challenge. An increasing amount of requirements for producing accurate retrieval results have to be considered, e.g., close integration of the system’s users. Due to these requirements, small to mid-size academic search systems cannot evaluate their retrieval system in-house. Evaluation infrastructures for shared tasks alleviate this situation. They allow researchers to experiment with retrieval approaches in specific search and recommendation scenarios without building their own infrastructure. In this paper, we elaborate on the benefits and shortcomings of four state-of-the-art evaluation infrastructures on search and recommendation tasks concerning the following requirements: support for online and offline evaluations, domain specificity of shared tasks, and reproducibility of experiments and results. In addition, we introduce an evaluation infrastructure concept design aiming at reducing the shortcomings in shared tasks for search and recommender systems.},
 keywords = {information retrieval; information retrieval}}