parquet-converter commited on
Commit
a90d1e7
1 Parent(s): 015ac88

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,53 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
35
- # Audio files - uncompressed
36
- *.pcm filter=lfs diff=lfs merge=lfs -text
37
- *.sam filter=lfs diff=lfs merge=lfs -text
38
- *.raw filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - compressed
40
- *.aac filter=lfs diff=lfs merge=lfs -text
41
- *.flac filter=lfs diff=lfs merge=lfs -text
42
- *.mp3 filter=lfs diff=lfs merge=lfs -text
43
- *.ogg filter=lfs diff=lfs merge=lfs -text
44
- *.wav filter=lfs diff=lfs merge=lfs -text
45
- # Image files - uncompressed
46
- *.bmp filter=lfs diff=lfs merge=lfs -text
47
- *.gif filter=lfs diff=lfs merge=lfs -text
48
- *.png filter=lfs diff=lfs merge=lfs -text
49
- *.tiff filter=lfs diff=lfs merge=lfs -text
50
- # Image files - compressed
51
- *.jpg filter=lfs diff=lfs merge=lfs -text
52
- *.jpeg filter=lfs diff=lfs merge=lfs -text
53
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,40 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- language_creators:
5
- - crowdsourced
6
- language:
7
- - en
8
- license:
9
- - apache-2.0
10
- multilinguality:
11
- - monolingual
12
- pretty_name: 'wep-probes'
13
- size_categories:
14
- - 1K<n<10K
15
- source_datasets:
16
- - original
17
- task_categories:
18
- - multiple-choice
19
- - question-answering
20
- task_ids:
21
- - open-domain-qa
22
- - multiple-choice-qa
23
- - natural-language-inference
24
- tags:
25
- - wep
26
- - words of estimative probability
27
- - probability
28
- - logical reasoning
29
- - soft logic
30
- ---
31
-
32
- # Dataset accompanying the "Probing neural language models for understanding of words of estimative probability" article
33
- ```bib
34
- @article{sileo2022probing,
35
- title={Probing neural language models for understanding of words of estimative probability},
36
- author={Sileo, Damien and Moens, Marie-Francine},
37
- journal={arXiv preprint arXiv:2211.03358},
38
- year={2022}
39
- }
40
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
reasoning_1hop/wep-probes-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:778985882089f3e41e552e11d6ba9b00bd3e25d049ce91e4e1ea9cbff18987ba
3
+ size 65721
reasoning_1hop/wep-probes-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0264612cfc0f6c8ebd69ff7871899a91625cdf1a4d80e02c5f1f48d2c7fecf2
3
+ size 486972
reasoning_1hop/wep-probes-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a43b4daca3ab709eaf20182cef36d267ef715f088943fbe5febdf994e593613
3
+ size 65916
reasoning_2hop/wep-probes-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dfad832ae6aaac812b7b6c8d27b860a33f6929d7f8106ec7ef41c5aaee70e7f
3
+ size 101115
reasoning_2hop/wep-probes-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69b8e237c979238a530463d678d447fa6e14030518618fb3b75e251ceae4fb23
3
+ size 760991
reasoning_2hop/wep-probes-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f704f44f9929fb11c1b850f02abf37f55e7691e2e7ae5b880ff18a33c4d6bab
3
+ size 100602
usnli/wep-probes-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2b94d22af1d3861190c8461d2e0e76509617bb1a79f4c394cd4bc46373d789e
3
+ size 573436
usnli/wep-probes-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab5b7ed77c63fc5dc37b42f6a7d530e61501f1a9b4a3d6b2b7c874ba1023a813
3
+ size 8882325
usnli/wep-probes-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec541db840165e09c1a9c3105bab934f270b81d253fefebc37e6d590433f8a67
3
+ size 578493
wep-probes.py DELETED
@@ -1,132 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
-
18
-
19
- import csv
20
- import os
21
- import textwrap
22
- import numpy as np
23
- import datasets
24
- import pandas as pd
25
-
26
-
27
- _CITATION = """\
28
- @article{sileo2022probing,
29
- title={Probing neural language models for understanding of words of estimative probability},
30
- author={Sileo, Damien and Moens, Marie-Francine},
31
- journal={arXiv preprint arXiv:2211.03358},
32
- year={2022}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- Probing neural language models for understanding of words of estimative probability
38
- """
39
-
40
- URL = 'https://sileod.s3.eu-west-3.amazonaws.com/probability_words/'
41
-
42
-
43
- class WepProbeConfig(datasets.BuilderConfig):
44
- """BuilderConfig for WepProbe."""
45
-
46
- def __init__(
47
- self,
48
- data_dir,
49
- label_classes=None,
50
- process_label=lambda x: x,
51
- **kwargs,
52
- ):
53
-
54
- super(WepProbeConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs)
55
- self.text_features = {k:k for k in ['context', 'hypothesis', 'valid_hypothesis', 'invalid_hypothesis','probability_word','distractor','hypothesis_assertion']}
56
- self.label_column = 'label'
57
- self.label_classes = ['valid', 'invalid']
58
- self.data_url = URL
59
- self.url=URL
60
- self.data_dir=data_dir
61
- self.citation = _CITATION
62
- self.process_label = process_label
63
-
64
-
65
- class WepProbe(datasets.GeneratorBasedBuilder):
66
- """Evaluation of word estimative of probability understanding"""
67
-
68
- BUILDER_CONFIGS = [
69
- WepProbeConfig(
70
- name="reasoning_1hop",
71
- data_dir="reasoning_1hop"),
72
- WepProbeConfig(
73
- name="reasoning_2hop",
74
- data_dir="reasoning_2hop"),
75
- WepProbeConfig(
76
- name="usnli",
77
- data_dir="usnli"),
78
- ]
79
-
80
- def _info(self):
81
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
82
- if self.config.label_classes:
83
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
84
- else:
85
- features["label"] = datasets.Value("float32")
86
- features["idx"] = datasets.Value("int32")
87
- features["probability"] = datasets.Value("float32")
88
-
89
- return datasets.DatasetInfo(
90
- description=_DESCRIPTION,
91
- features=datasets.Features(features),
92
- homepage=self.config.url,
93
- citation=self.config.citation + "\n" + _CITATION,
94
- )
95
- def _split_generators(self, dl_manager):
96
-
97
- data_dirs=[]
98
- for split in ['train','validation','test']:
99
- url=f'{URL}{self.config.data_dir}_{split}.csv'
100
- print(url)
101
- data_dirs+=[dl_manager.download(url)]
102
- print(data_dirs)
103
- return [
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TRAIN,
106
- gen_kwargs={
107
- "data_file": data_dirs[0],
108
- "split": "train",
109
- },
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- gen_kwargs={
114
- "data_file": data_dirs[1],
115
- "split": "dev",
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={
121
- "data_file": data_dirs[2],
122
- "split": "test",
123
- },
124
- ),
125
- ]
126
-
127
- def _generate_examples(self, data_file, split):
128
- df = pd.read_csv(data_file).drop(['rnd','split','_'],axis=1,errors='ignore')
129
- df['idx']=df.index
130
- for idx, example in df.iterrows():
131
- yield idx, dict(example)
132
-