parquet-converter commited on
Commit
fae1593
•
1 Parent(s): 92d883f

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,55 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
52
- test/xor_test_full_q_only_v1_1.jsonl filter=lfs diff=lfs merge=lfs -text
53
- test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl filter=lfs diff=lfs merge=lfs -text
54
- dev/xor_dev_full_v1_1.jsonl filter=lfs diff=lfs merge=lfs -text
55
- dev/xor_dev_retrieve_eng_span_v1_1.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl → eng_span/xor-tydi-dev.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0da54e12de5b3fc5eb67c6fc02e6ef96a7f42fcdc9adc86f9a1ec3dd9fc4649
3
- size 362453
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84027603eb3389629ea85b262475c5c7fc22b6678fc96771e3781a32db7f5caf
3
+ size 197407
dev/xor_dev_full_v1_1.jsonl → eng_span/xor-tydi-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f5145e3bb9e58fd5d25bbb32d289dfa59405999f523295bf80bdecb9e74271e
3
- size 715496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d61031d7cd67e85cd3165b30175081f58455f70e0ca521d9e1a860684d7699
3
+ size 157260
eng_span/xor-tydi-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fddfca152846e31700822db1db9fea0fc4016cbbc2960f4870359e3d7f37d080
3
+ size 311313002
eng_span/xor-tydi-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ab604217995f8295bde52982c025c3632cbcbc76cf4123fabda43afb1c733d4
3
+ size 190212770
dev/xor_dev_retrieve_eng_span_v1_1.jsonl → full/xor-tydi-dev.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfe569e4c6d3539c2111d97249f900318f9273b246a61fff95216ce2a2e1505c
3
- size 426987
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:041b358433b85ea3cd1500c9c14fcf4364dc586c49e8329867a98d4c3a6a0161
3
+ size 329170
test/xor_test_full_q_only_v1_1.jsonl → full/xor-tydi-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c1010182a8f5c8845b6e54a6b22ce2927000820f31fca66be77ec402eea3252
3
- size 1175899
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac8ec89ddc4a4ac284e65df0b976717349b89aef499e9ab0be3bdba83adfd52
3
+ size 492064
full/xor-tydi-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fddfca152846e31700822db1db9fea0fc4016cbbc2960f4870359e3d7f37d080
3
+ size 311313002
full/xor-tydi-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ab604217995f8295bde52982c025c3632cbcbc76cf4123fabda43afb1c733d4
3
+ size 190212770
train/.gitattributes DELETED
@@ -1 +0,0 @@
1
- xor-t2e-100w.jsonl.gz filter=lfs diff=lfs merge=lfs -text
 
 
train/xor-t2e-100w.jsonl.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aef9dc8c6bbf6fe94d5c751af2601cf48b4561a5712381d12178c0a1eba9e6e0
3
- size 312241526
 
 
 
 
xor-tydi.py DELETED
@@ -1,137 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Wikipedia NQ dataset."""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @inproceedings{xorqa,
25
- title = {{XOR} {QA}: Cross-lingual Open-Retrieval Question Answering},
26
- author = {Akari Asai and Jungo Kasai and Jonathan H. Clark and Kenton Lee and Eunsol Choi and Hannaneh Hajishirzi},
27
- booktitle={NAACL-HLT},
28
- year = {2021}
29
- }
30
- """
31
-
32
- _DESCRIPTION = "dataset load script for Wikipedia NQ"
33
-
34
- base = "/home/czhang/src/task-sparse/tevatron/hgf_datasets/xor-tydi"
35
- _DATASET_URLS = {
36
- 'eng_span': {
37
- 'train': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/train/xor-t2e-100w.jsonl.gz',
38
- 'dev': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/dev/xor_dev_retrieve_eng_span_v1_1.jsonl',
39
- 'test': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl',
40
- },
41
- 'full': {
42
- 'train': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/train/xor-t2e-100w.jsonl.gz',
43
- 'dev': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/dev/xor_dev_full_v1_1.jsonl',
44
- 'test': f'https://huggingface.co/datasets/Tevatron/xor-tydi/resolve/main/test/xor_test_full_q_only_v1_1.jsonl',
45
- }
46
- # 'test': f"{base}",
47
- }
48
-
49
-
50
- class XORTyDi(datasets.GeneratorBasedBuilder):
51
- VERSION = datasets.Version("0.0.1")
52
-
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(
55
- version=VERSION,
56
- name="eng_span",
57
- description="XOR-TyDI train/dev/test datasets of English Span Task"),
58
- datasets.BuilderConfig(
59
- version=VERSION,
60
- name="full",
61
- description="XOR-TyDI train/dev/test datasets of Full Task"),
62
- ]
63
-
64
- def _info(self):
65
- features = datasets.Features({
66
- 'query_id': datasets.Value('string'),
67
- 'query': datasets.Value('string'),
68
- 'answers': [datasets.Value('string')],
69
- 'positive_passages': [
70
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
71
- 'title': datasets.Value('string')}
72
- ],
73
- 'negative_passages': [
74
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
75
- 'title': datasets.Value('string')}
76
- ],
77
- })
78
- return datasets.DatasetInfo(
79
- # This is the description that will appear on the datasets page.
80
- description=_DESCRIPTION,
81
- # This defines the different columns of the dataset and their types
82
- features=features, # Here we define them above because they are different between the two configurations
83
- supervised_keys=None,
84
- # Homepage of the dataset for documentation
85
- homepage="",
86
- # License for the dataset if available
87
- license="",
88
- # Citation for the dataset
89
- citation=_CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- group = self.config.name
94
- if self.config.data_files:
95
- downloaded_files = self.config.data_files
96
- else:
97
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[group])
98
- splits = [
99
- datasets.SplitGenerator(
100
- name=split,
101
- gen_kwargs={
102
- "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
103
- },
104
- ) for split in downloaded_files
105
- ]
106
- return splits
107
-
108
- def _generate_examples(self, files):
109
- """Yields examples."""
110
- def process_train_entry(data):
111
- if data.get('negative_passages') is None:
112
- data['negative_passages'] = []
113
- if data.get('positive_passages') is None:
114
- data['positive_passages'] = []
115
- if data.get('answers') is None:
116
- data['answers'] = []
117
- return data['query_id'], data
118
-
119
- def process_dev_test_entry(data):
120
- return data["id"], {
121
- "query_id": data["id"],
122
- "query": data["question"],
123
- "answers": data.get("answers", []),
124
- "positive_passages": [],
125
- "negative_passages": [],
126
- }
127
-
128
- for filepath in files:
129
- with open(filepath, encoding="utf-8") as f:
130
- for line in f:
131
- data = json.loads(line)
132
-
133
- if "id" in data and "query_id" not in data:
134
- yield process_dev_test_entry(data)
135
- else:
136
- yield process_train_entry(data)
137
-