Datasets:

Modalities:
Text
ArXiv:
License:
ContextualBench / README.md
nxphi47's picture
Update README.md
0d57ba0 verified
metadata
license: other
dataset_info:
  - config_name: 2WikiMultihopQA
    features:
      - name: _id
        dtype: string
      - name: type
        dtype: string
      - name: question
        dtype: string
      - name: context
        sequence:
          - name: title
            dtype: string
          - name: content
            sequence: string
      - name: supporting_facts
        sequence:
          - name: title
            dtype: string
          - name: sent_id
            dtype: int32
      - name: evidences
        sequence:
          - name: fact
            dtype: string
          - name: relation
            dtype: string
          - name: entity
            dtype: string
      - name: answer
        dtype: string
    splits:
      - name: train
        num_bytes: 662142981
        num_examples: 167454
      - name: dev
        num_bytes: 54346346
        num_examples: 12576
      - name: test
        num_bytes: 51639331
        num_examples: 12576
    download_size: 389826062
    dataset_size: 768128658
  - config_name: MuSiQue
    features:
      - name: id
        dtype: string
      - name: paragraphs
        list:
          - name: idx
            dtype: int64
          - name: title
            dtype: string
          - name: paragraph_text
            dtype: string
          - name: is_supporting
            dtype: bool
      - name: question
        dtype: string
      - name: question_decomposition
        list:
          - name: id
            dtype: int64
          - name: question
            dtype: string
          - name: answer
            dtype: string
          - name: paragraph_support_idx
            dtype: int64
      - name: answer
        dtype: string
      - name: answer_aliases
        sequence: string
      - name: answerable
        dtype: bool
      - name: text_all
        dtype: string
      - name: text_all_support
        dtype: string
    splits:
      - name: validation
        num_bytes: 55971326
        num_examples: 2417
    download_size: 23776203
    dataset_size: 55971326
  - config_name: NQ
    features:
      - name: id
        dtype: string
      - name: title
        dtype: string
      - name: document
        dtype: string
      - name: question
        dtype: string
      - name: long_answers
        sequence: string
      - name: short_answers
        sequence: string
      - name: retrieved_passages
        sequence: string
    splits:
      - name: validation
        num_bytes: 279214996
        num_examples: 4289
    download_size: 141438208
    dataset_size: 279214996
  - config_name: hotpotqa
    features:
      - name: id
        dtype: string
      - name: question
        dtype: string
      - name: answer
        dtype: string
      - name: type
        dtype: string
      - name: level
        dtype: string
      - name: supporting_facts
        sequence:
          - name: title
            dtype: string
          - name: sent_id
            dtype: int32
      - name: context
        sequence:
          - name: title
            dtype: string
          - name: sentences
            sequence: string
      - name: rag
        sequence: string
      - name: retrieved_passages
        sequence: string
    splits:
      - name: validation
        num_bytes: 131225660
        num_examples: 7405
    download_size: 77113296
    dataset_size: 131225660
  - config_name: triviaqa
    features:
      - name: question
        dtype: string
      - name: question_id
        dtype: string
      - name: question_source
        dtype: string
      - name: entity_pages
        sequence:
          - name: doc_source
            dtype: string
          - name: filename
            dtype: string
          - name: title
            dtype: string
          - name: wiki_context
            dtype: string
      - name: search_results
        sequence:
          - name: description
            dtype: string
          - name: filename
            dtype: string
          - name: rank
            dtype: int32
          - name: title
            dtype: string
          - name: url
            dtype: string
          - name: search_context
            dtype: string
      - name: answer
        struct:
          - name: aliases
            sequence: string
          - name: normalized_aliases
            sequence: string
          - name: matched_wiki_entity_name
            dtype: string
          - name: normalized_matched_wiki_entity_name
            dtype: string
          - name: normalized_value
            dtype: string
          - name: type
            dtype: string
          - name: value
            dtype: string
      - name: retrieved_passages
        sequence: string
    splits:
      - name: validation
        num_bytes: 474767227
        num_examples: 7993
    download_size: 262352984
    dataset_size: 474767227
  - config_name: truthfulqa
    features:
      - name: question
        dtype: string
      - name: mc1_targets
        struct:
          - name: choices
            sequence: string
          - name: labels
            sequence: int32
      - name: mc2_targets
        struct:
          - name: choices
            sequence: string
          - name: labels
            sequence: int32
      - name: category
        dtype: string
      - name: source
        dtype: string
      - name: website_data
        dtype: string
      - name: retrieved_passages
        sequence: string
    splits:
      - name: validation
        num_bytes: 24476993
        num_examples: 817
    download_size: 10176147
    dataset_size: 24476993
configs:
  - config_name: 2WikiMultihopQA
    data_files:
      - split: train
        path: 2WikiMultihopQA/train-*
      - split: dev
        path: 2WikiMultihopQA/dev-*
      - split: test
        path: 2WikiMultihopQA/test-*
  - config_name: MuSiQue
    data_files:
      - split: validation
        path: MuSiQue/validation-*
  - config_name: NQ
    data_files:
      - split: validation
        path: NQ/validation-*
  - config_name: boolq
    data_files:
      - split: validation
        path: boolq/validation-*
  - config_name: hotpotqa
    data_files:
      - split: validation
        path: hotpotqa/validation-*
  - config_name: triviaqa
    data_files:
      - split: validation
        path: triviaqa/validation-*
  - config_name: truthfulqa
    data_files:
      - split: validation
        path: truthfulqa/validation-*

ContextualBench - A comprehensive toolkit to evaluate LM on different Contextual datasets

Evaluation Code: SalesforceAIResearch/SFR-RAG

Description

ContextualBench is a powerful evaluation framework designed to assess the performance of Large Language Models (LLMs) on contextual datasets. It provides a flexible pipeline for evaluating various LLM families across different tasks, with a focus on handling large context inputs.

Users need to make their own assessment regarding any obligations or responsibilities under the corresponding licenses or terms and conditions pertaining to the original datasets and data.

Features

  • Dynamic Retrieval Support: Efficiently handles large context inputs, allowing for comprehensive evaluation of LLMs' contextual understanding capabilities.
  • Extensive Evaluation Dataset: Supports 7 contextual tasks, including: Question Answering (QA), Multi-Hop Question Answering, Classification tasks
  • Multi-LLM Family Support: Compatible with a wide range of LLM families, including: Hugging Face models, Gemma, Mistral, OpenAI, Cohere.

Component Datasets of ContextualBench

Users need to make their own assessment regarding any obligations or responsibilities under the corresponding licenses or terms and conditions pertaining to the original datasets and data.

2WikiHotpotQA

This dataset is a multihop question answering task, as proposed in "Constructing A Multi-hop QA Dataset for Comprehensive Evaluation of Reasoning Steps" by Ho. et. al The folder contains evaluation script and path to dataset on the validation split on around 12k samples.

@inproceedings{xanh2020_2wikimultihop,
    title = "Constructing A Multi-hop {QA} Dataset for Comprehensive Evaluation of Reasoning Steps",
    author = "Ho, Xanh  and
      Duong Nguyen, Anh-Khoa  and
      Sugawara, Saku  and
      Aizawa, Akiko",
    booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
    month = dec,
    year = "2020",
    address = "Barcelona, Spain (Online)",
    publisher = "International Committee on Computational Linguistics",
    url = "https://www.aclweb.org/anthology/2020.coling-main.580",
    pages = "6609--6625",
}

HotpotQA

HotpotQA is a Wikipedia-based question-answer pairs with the questions require finding and reasoning over multiple supporting documents to answer. We evaluate on 7405 datapoints, on the distractor setting. This dataset was proposed in the below paper

@inproceedings{yang2018hotpotqa,
  title={{HotpotQA}: A Dataset for Diverse, Explainable Multi-hop Question Answering},
  author={Yang, Zhilin and Qi, Peng and Zhang, Saizheng and Bengio, Yoshua and Cohen, William W. and Salakhutdinov, Ruslan and Manning, Christopher D.},
  booktitle={Conference on Empirical Methods in Natural Language Processing ({EMNLP})},
  year={2018}
}

MuSiQue

This dataset is a multihop question answering task, that requires 2-4 hop in every questions, making it slightly harder task when compared to other multihop tasks.This dataset was proposed in the below paper

@article{trivedi2021musique,
  title={{M}u{S}i{Q}ue: Multihop Questions via Single-hop Question Composition},
  author={Trivedi, Harsh and Balasubramanian, Niranjan and Khot, Tushar and Sabharwal, Ashish},
  journal={Transactions of the Association for Computational Linguistics},
  year={2022}
  publisher={MIT Press}
}

NaturalQuestions

The NQ corpus contains questions from real users, and it requires QA systems to read and comprehend an entire Wikipedia article that may or may not contain the answer to the question

@article{47761,
title	= {Natural Questions: a Benchmark for Question Answering Research},
author	= {Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year	= {2019},
journal	= {Transactions of the Association of Computational Linguistics}
}

PopQA

PopQA is a large-scale open-domain question answering (QA) dataset, the long-tail subset, consisting of 1,399 rare entity queries whose monthly Wikipedia page views are less than 100

Make sure to cite the work

@article{ mallen2023llm_memorization ,
  title={When Not to Trust Language Models: Investigating Effectiveness and Limitations of Parametric and Non-Parametric Memories },
  author={ Mallen, Alex and Asai,Akari and  Zhong, Victor and Das, Rajarshi and Hajishirzi, Hannaneh and Khashabi, Daniel},
  journal={ arXiv preprint },
  year={ 2022 }
}

TriviaQA

TriviaqQA is a reading comprehension dataset containing question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions.

@article{2017arXivtriviaqa,
       author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
                 Daniel and {Zettlemoyer}, Luke},
        title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
      journal = {arXiv e-prints},
         year = 2017,
          eid = {arXiv:1705.03551},
        pages = {arXiv:1705.03551},
archivePrefix = {arXiv},
       eprint = {1705.03551},
}

TruthfulQA

TruthfulQA is a benchmark to measure whether a language model is truthful in generating answers to questions. The benchmark comprises 817 questions that span 38 categories, including health, law, finance and politics. Questions are crafted so that some humans would answer falsely due to a false belief or misconception. To perform well, models must avoid generating false answers learned from imitating human texts.

@misc{lin2021truthfulqa,
    title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
    author={Stephanie Lin and Jacob Hilton and Owain Evans},
    year={2021},
    eprint={2109.07958},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

Citation

@article{nguyen2024sfrrag,
  title={SFR-RAG: Towards Contextually Faithful LLMs},
  author={Nguyen, Xuan-Phi and Pandit, Shrey and Purushwalkam, Senthil and Xu, Austin and Chen, Hailin and Ming, Yifei and Ke, Zixuan and Savarese, Silvio and Xong, Caiming and Joty, Shafiq},
  year={2024}
}