@inproceedings{0642138ec63d45199b0bcf2e895a76c6,
title = "VLR-Bench: Multilingual Benchmark Dataset for Vision-Language Retrieval Augmented Generation",
abstract = "We propose the VLR-BENCH, a visual question answering (VQA) benchmark for evaluating vision language models (VLMs) based on retrieval augmented generation (RAG). Unlike existing evaluation datasets for external knowledge-based VQA, the proposed VLR-BENCH includes five input passages. This allows testing of the ability to determine which passage is useful for answering a given query, a capability lacking in previous research. In this context, we constructed a dataset of 32,000 automatically generated instruction-following examples, which we denote as VLR-IF. This dataset is specifically designed to enhance the RAG capabilities of VLMs by enabling them to learn how to generate appropriate answers based on input passages. We evaluated the validity of the proposed benchmark and training data and verified its performance using the state-of-the-art Llama3-based VLM, the Llava-Llama-3 model. The proposed VLR-BENCH1 and VLR-IF2 datasets are publicly available online.",
author = "Hyeonseok Lim and Dongjae Shin and Seohyun Song and Inho Won and Minjun Kim and Junghun Yuk and Haneol Jang and Lim, \{Kyung Tae\}",
note = "Publisher Copyright: {\textcopyright} 2025 Association for Computational Linguistics.; 31st International Conference on Computational Linguistics, COLING 2025 ; Conference date: 19-01-2025 Through 24-01-2025",
year = "2025",
language = "English",
series = "Proceedings - International Conference on Computational Linguistics, COLING",
publisher = "Association for Computational Linguistics (ACL)",
pages = "6150--6168",
editor = "Owen Rambow and Leo Wanner and Marianna Apidianaki and Hend Al-Khalifa and \{Di Eugenio\}, Barbara and Steven Schockaert",
booktitle = "Main Conference",
}