Xinyu Crystina ZHANG commited on
Commit
72ffcc6
1 Parent(s): 4505323
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. mmarco-train-bi.py +110 -0
  3. spanish.json.gz +3 -0
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ spanish.json.gz filter=lfs diff=lfs merge=lfs -text
mmarco-train-bi.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """mMARCO Passage dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ """
25
+
26
+ _DESCRIPTION = "dataset load script for mMARCO bilingual-training datasets"
27
+
28
+ languages = [
29
+ "spanish"
30
+ ]
31
+ _DATASET_URLS = {
32
+ lang: {
33
+ 'train': f"https://huggingface.co/datasets/crystina-z/mmarco-train-bi/resolve/main/{lang}.jsonl.gz",
34
+ } for lang in languages
35
+ }
36
+
37
+
38
+ class MMarcoPassage(datasets.GeneratorBasedBuilder):
39
+ BUILDER_CONFIGS = [datasets.BuilderConfig(
40
+ version=datasets.Version("0.0.1"),
41
+ name=lang,
42
+ description=f"mMARCO bilingual-training datasets for {lang}"
43
+ ) for lang in languages
44
+ ]
45
+
46
+ def _info(self):
47
+ features = datasets.Features({
48
+ 'query_id': datasets.Value('string'),
49
+ 'query_source': datasets.Value('string'),
50
+ 'query_target': datasets.Value('string'),
51
+ 'positive_passages_source': [
52
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
53
+ ],
54
+ 'positive_passages_target': [
55
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
56
+ ],
57
+ 'negative_passages_source': [
58
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
59
+ ],
60
+ 'negative_passages_target': [
61
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
62
+ ]
63
+ })
64
+ return datasets.DatasetInfo(
65
+ # This is the description that will appear on the datasets page.
66
+ description=_DESCRIPTION,
67
+ # This defines the different columns of the dataset and their types
68
+ features=features, # Here we define them above because they are different between the two configurations
69
+ supervised_keys=None,
70
+ # Homepage of the dataset for documentation
71
+ homepage="",
72
+ # License for the dataset if available
73
+ license="",
74
+ # Citation for the dataset
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ lang = self.config.name
80
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang])
81
+ '''
82
+ if self.config.data_files:
83
+ downloaded_files = self.config.data_files
84
+ else:
85
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
86
+ '''
87
+ splits = [
88
+ datasets.SplitGenerator(
89
+ name=split,
90
+ gen_kwargs={
91
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else
92
+ downloaded_files[split],
93
+ },
94
+ ) for split in downloaded_files
95
+ ]
96
+ return splits
97
+
98
+ def _generate_examples(self, files):
99
+ """Yields examples."""
100
+ for filepath in files:
101
+ with open(filepath, encoding="utf-8") as f:
102
+ for line in f:
103
+ data = json.loads(line)
104
+ if data.get('negative_passages_source') is None:
105
+ data['negative_passages_source'] = []
106
+ data['negative_passages_target'] = []
107
+ if data.get('positive_passages_source') is None:
108
+ data['positive_passages_source'] = []
109
+ data['positive_passages_target'] = []
110
+ yield data['query_id'], data
spanish.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b5b4d679ecc1a24b8a785307ac9c5d689c4f4a837171f04e62c34df02cb8d8b
3
+ size 3418669249