Datasets:
Tags:
License:
# coding=utf-8 | |
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
import csv | |
import os | |
import textwrap | |
import numpy as np | |
import datasets | |
import pandas as pd | |
_CITATION = """\ | |
Probing neural language models for understanding of words of estimative probability | |
Anonymous submission | |
""" | |
_DESCRIPTION = """\ | |
Probing neural language models for understanding of words of estimative probability | |
Anonymous submission | |
""" | |
URL = 'https://sileod.s3.eu-west-3.amazonaws.com/probability_words/' | |
class WepProbeConfig(datasets.BuilderConfig): | |
"""BuilderConfig for WepProbe.""" | |
def __init__( | |
self, | |
data_dir, | |
label_classes=None, | |
process_label=lambda x: x, | |
**kwargs, | |
): | |
super(WepProbeConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs) | |
self.text_features = {k:k for k in ['context', 'hypothesis', 'valid_hypothesis', 'invalid_hypothesis','probability_word','distractor','hypothesis_assertion']} | |
self.label_column = 'label' | |
self.label_classes = ['valid', 'invalid'] | |
self.data_url = URL | |
self.url=URL | |
self.data_dir=data_dir | |
self.citation = _CITATION | |
self.process_label = process_label | |
class WepProbe(datasets.GeneratorBasedBuilder): | |
"""Evaluation of word estimative of probability understanding""" | |
BUILDER_CONFIGS = [ | |
WepProbeConfig( | |
name="reasoning_1hop", | |
data_dir="reasoning_1hop"), | |
WepProbeConfig( | |
name="reasoning_2hop", | |
data_dir="reasoning_2hop"), | |
WepProbeConfig( | |
name="usnli", | |
data_dir="usnli"), | |
] | |
def _info(self): | |
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} | |
if self.config.label_classes: | |
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes) | |
else: | |
features["label"] = datasets.Value("float32") | |
features["idx"] = datasets.Value("int32") | |
features["probability"] = datasets.Value("float32") | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features(features), | |
homepage=self.config.url, | |
citation=self.config.citation + "\n" + _CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
data_dirs=[] | |
for split in ['train','validation','test']: | |
url=f'{URL}{self.config.data_dir}_{split}.csv' | |
print(url) | |
data_dirs+=[dl_manager.download(url)] | |
print(data_dirs) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"data_file": data_dirs[0], | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"data_file": data_dirs[1], | |
"split": "dev", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"data_file": data_dirs[2], | |
"split": "test", | |
}, | |
), | |
] | |
def _generate_examples(self, data_file, split): | |
df = pd.read_csv(data_file).drop(['rnd','split','_'],axis=1,errors='ignore') | |
df['idx']=df.index | |
for idx, example in df.iterrows(): | |
yield idx, dict(example) | |