sts-es / sts-es.py
mapama247's picture
Update sts-es.py
0912bb6
# Loading script for the STS dataset.
import datasets
import os
import csv
import hashlib
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """
Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Iñigo Lopez-Gazpio, Montse Maritxalar, Rada Mihalcea, German Rigau, Larraitz Uria, and Janyce Wiebe. 2015. SemEval-2015 Task 2: Semantic Textual Similarity, English, Spanish and Pilot on Interpretability. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval 2015), pages 252–263, Denver, Colorado. Association for Computational Linguistics.
Eneko Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2014. SemEval-2014 Task 10: Multilingual Semantic Textual Similarity. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 81–91, Dublin, Ireland. Association for Computational Linguistics.
"""
_DESCRIPTION = """
For Semantic Text Similarity, we collected the Spanish test sets from SemEval-2014 (Agirre et al., 2014) and SemEval-2015 (Agirre et al., 2015). Since no training data was provided for the Spanish subtask, we randomly sampled both datasets into 1,321 sentences for the train set, 78 sentences for the development set, and 156 sentences for the test set. To make the task harder for the models, we purposely made the development set smaller than the test set.
"""
_HOMEPAGE = """http://ixa2.si.ehu.eus/stswiki/index.php/Main_Page"""
_URL = "http://ixa2.si.ehu.es/stswiki/images/"
_FILE_1 = "9/9d/STS2015-es-test.zip"
_FILE_2 = "9/9a/STS2014-es-test.zip"
_LOC_HASHED_FILES = 'https://huggingface.co/datasets/PlanTL-GOB-ES/sts-es/resolve/main/hashed_dataset'
_TRAIN = "hashed_train.tsv"
_VALID = "hashed_valid.tsv"
_TEST = "hashed_test.tsv"
class STSConfig(datasets.BuilderConfig):
""" Builder config for the STS dataset """
def __init__(self, **kwargs):
"""BuilderConfig for STS.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(STSConfig, self).__init__(**kwargs)
class STS(datasets.GeneratorBasedBuilder):
""" STS dataset."""
BUILDER_CONFIGS = [
STSConfig(
name="STS",
version=datasets.Version("1.0.0"),
description="STS dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.features.Value('float')
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# download original official sentences dataset
urls_to_download = {
"file1": f"{_URL}{_FILE_1}",
"file2": f"{_URL}{_FILE_2}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
# create a hash-sentences dictionary with the original sentences
hashes_sentences_dict = {}
for file_number, folder_path in downloaded_files.items():
for file_name in os.listdir(folder_path):
if 'STS.input' not in file_name:
continue
i_f = open(os.path.join(folder_path, file_name))
lines = csv.reader(i_f, delimiter='\t', quoting=csv.QUOTE_NONE)
for line in lines:
hash_ = hashlib.sha256(f"{line[0]}{line[1]}".encode('utf-8')).hexdigest()
if hash_ in hashes_sentences_dict and hashes_sentences_dict[hash_] != line:
print(f"WARNING: hash {hash_} is repeated.\nThis is produced by the following couple of "
f"sentences:\n - {hashes_sentences_dict[hash_]}\n - {line}\nThis can cause problems!\n")
hashes_sentences_dict[hash_] = line
# generate splits
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{_LOC_HASHED_FILES}/{_TRAIN}", "hash_dict":hashes_sentences_dict}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": f"{_LOC_HASHED_FILES}/{_VALID}", "hash_dict":hashes_sentences_dict}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": f"{_LOC_HASHED_FILES}/{_TEST}", "hash_dict":hashes_sentences_dict}),
]
def _generate_examples(self, filepath, hash_dict):
logger.info("⏳ Generating examples from = %s", filepath)
df = pd.read_csv(filepath, delimiter='\t', quoting=csv.QUOTE_NONE)
for index, line in df.iterrows():
sentence1, sentence2 = hash_dict[line[0]]
label = line[1]
yield index, {
"id": str(index),
"sentence1": sentence1,
"sentence2": sentence2,
"label": label
}