icl_consistency_test / icl_consistency_test.py
LucasWeber's picture
Update icl_consistency_test.py
c0f9d54 verified
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This πŸ€— dataset provides data for the GenBench CBT task 'The ICL consistency test' (see https://github.com/GenBench/genbench_cbt/tree/main/src/genbench/tasks/icl_consistency_test).
The ICL consistency test measures the consistency of LLM predictions on the same data points across many different equivalent prompting setups.
The score in the associated metric (Cohen's kappa) can be understood as a measure of a model's prediction consistency in the face of task-irrelevant information.
For an easy evaluation of any πŸ€— models, we refer to the code provided in the GenBench task. For in-depth information on the task, we refer to the associated
publications (Weber et al., 2023,2023) and the respective GenBench doc.md (https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md).
Evaluation on the relevant metrics can be done via the example_evaluation.py script in the GenBench repository.
- Weber, L., Bruni, E., & Hupkes, D. (2023, December). Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning.
In Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL) (pp. 294-313).
- Weber, L., Bruni, E., & Hupkes, D. (2023). The ICL Consistency Test. arXiv preprint arXiv:2312.04945.
"""
import json
import os
import datasets
_CITATION = """\
@inproceedings{weber2023mind,
title={Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning},
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
booktitle={Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages={294--313},
year={2023}
},
@article{weber2023icl,
title={The ICL Consistency Test},
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
journal={arXiv preprint arXiv:2312.04945},
year={2023}
}
"""
_DESCRIPTION = """\
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any πŸ€— model.
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
"""
_HOMEPAGE = "https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md"
_LICENSE = ""
_URL = "https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/" #"https://huggingface.co/datasets/LucasWeber/icl_consistency_test/blob/main/"
_URLS = {
"anli": _URL + "genbench_all_anli.jsonl",
"mnli": _URL + "genbench_all_glue%2Bmnli.jsonl",
}
class ICLConsistencyTest(datasets.GeneratorBasedBuilder):
"""
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any πŸ€— model.
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="anli", version=VERSION, description="This part of the ICL consistency test covers data points from ANLI"),
datasets.BuilderConfig(name="mnli", version=VERSION, description="This part of the ICL consistency test covers data points from MNLI"),
]
def _info(self):
features = datasets.Features(
{
"input": datasets.Value("string"),
"target": datasets.Value("string"),
"target_numeric": datasets.Value("int32"),
"data_ID": datasets.Value("int32"),
"setup_ID": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("input", "target"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir,
"split": "test"},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, {
"input": data["input"],
"target": data["target"],
"target_numeric": data["target_numeric"],
"data_ID": data["data_ID"],
"setup_ID": data["setup_ID"],
}