sileod commited on
Commit
653c6d4
1 Parent(s): 231a52f

Upload wep-probes.py

Browse files
Files changed (1) hide show
  1. wep-probes.py +129 -0
wep-probes.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+
19
+ import csv
20
+ import os
21
+ import textwrap
22
+ import numpy as np
23
+ import datasets
24
+ import pandas as pd
25
+
26
+
27
+ _CITATION = """\
28
+ Probing neural language models for understanding of words of estimative probability
29
+ Anonymous submission
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ Probing neural language models for understanding of words of estimative probability
34
+ Anonymous submission
35
+ """
36
+
37
+ URL = 'https://sileod.s3.eu-west-3.amazonaws.com/probability_words/'
38
+
39
+
40
+ class WepProbeConfig(datasets.BuilderConfig):
41
+ """BuilderConfig for WepProbe."""
42
+
43
+ def __init__(
44
+ self,
45
+ data_dir,
46
+ label_classes=None,
47
+ process_label=lambda x: x,
48
+ **kwargs,
49
+ ):
50
+
51
+ super(WepProbeConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs)
52
+ self.text_features = {k:k for k in ['context', 'hypothesis', 'valid_hypothesis', 'invalid_hypothesis','probability_word','distractor','hypothesis_assertion']}
53
+ self.label_column = 'label'
54
+ self.label_classes = ['valid', 'invalid']
55
+ self.data_url = URL
56
+ self.url=URL
57
+ self.data_dir=data_dir
58
+ self.citation = _CITATION
59
+ self.process_label = process_label
60
+
61
+
62
+ class WepProbe(datasets.GeneratorBasedBuilder):
63
+ """Evaluation of word estimative of probability understanding"""
64
+
65
+ BUILDER_CONFIGS = [
66
+ WepProbeConfig(
67
+ name="reasoning_1hop",
68
+ data_dir="reasoning_1hop"),
69
+ WepProbeConfig(
70
+ name="reasoning_2hop",
71
+ data_dir="reasoning_2hop"),
72
+ WepProbeConfig(
73
+ name="usnli",
74
+ data_dir="usnli"),
75
+ ]
76
+
77
+ def _info(self):
78
+ features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
79
+ if self.config.label_classes:
80
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
81
+ else:
82
+ features["label"] = datasets.Value("float32")
83
+ features["idx"] = datasets.Value("int32")
84
+ features["probability"] = datasets.Value("float32")
85
+
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(features),
89
+ homepage=self.config.url,
90
+ citation=self.config.citation + "\n" + _CITATION,
91
+ )
92
+ def _split_generators(self, dl_manager):
93
+
94
+ data_dirs=[]
95
+ for split in ['train','validation','test']:
96
+ url=f'{URL}{self.config.data_dir}_{split}.csv'
97
+ print(url)
98
+ data_dirs+=[dl_manager.download(url)]
99
+ print(data_dirs)
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={
104
+ "data_file": data_dirs[0],
105
+ "split": "train",
106
+ },
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={
111
+ "data_file": data_dirs[1],
112
+ "split": "dev",
113
+ },
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TEST,
117
+ gen_kwargs={
118
+ "data_file": data_dirs[2],
119
+ "split": "test",
120
+ },
121
+ ),
122
+ ]
123
+
124
+ def _generate_examples(self, data_file, split):
125
+ df = pd.read_csv(data_file).drop(['rnd','split','_'],axis=1,errors='ignore')
126
+ df['idx']=df.index
127
+ for idx, example in df.iterrows():
128
+ yield idx, dict(example)
129
+