Maxime commited on
Commit
cea0b24
1 Parent(s): 0fa3b34

add all configuration

Browse files
Files changed (2) hide show
  1. .gitignore +2 -1
  2. mfaq.py +25 -22
.gitignore CHANGED
@@ -1 +1,2 @@
1
- test.py
 
1
+ test.py
2
+ *.lock
mfaq.py CHANGED
@@ -42,14 +42,17 @@ _LICENSE = ""
42
 
43
  _LANGUAGES = ["cs", "da", "de", "en", "es", "fi", "fr", "he", "hr", "hu", "id", "it", "nl", "no", "pl", "pt", "ro", "ru", "sv", "tr", "vi"]
44
  _URLs = {}
45
- _URLs.update({f"{l}": {"train": f"data/{l}/train.jsonl", "valid": f"data/{l}/valid.jsonl"} for l in _LANGUAGES})
46
- _URLs.update({f"{l}_flat": {"train": f"data/{l}/train.jsonl", "valid": f"data/{l}/valid.jsonl"} for l in _LANGUAGES})
 
 
47
 
48
 
49
  class MFAQ(datasets.GeneratorBasedBuilder):
50
 
51
  VERSION = datasets.Version("1.0.0")
52
  BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.1.0")), _URLs.keys()))
 
53
 
54
  def _info(self):
55
  features = datasets.Features(
@@ -83,36 +86,36 @@ class MFAQ(datasets.GeneratorBasedBuilder):
83
  return [
84
  datasets.SplitGenerator(
85
  name=datasets.Split.TRAIN,
86
- gen_kwargs={"filepath": data_dir["train"], "split": "train"},
87
  ),
88
  datasets.SplitGenerator(
89
  name=datasets.Split.VALIDATION,
90
- gen_kwargs={"filepath": data_dir["valid"], "split": "valid"},
91
  ),
92
  ]
93
 
94
  def _generate_examples(
95
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
96
  ):
97
  """ Yields examples as (key, example) tuples. """
98
-
99
- with open(filepath, encoding="utf-8") as f:
100
- for _id, row in enumerate(f):
101
- data = json.loads(row)
102
- if "flat" in self.config.name:
103
- for i, pair in enumerate(data["qa_pairs"]):
104
- yield f"{_id}_{i}", {
 
 
 
 
 
 
 
 
105
  "id": data["id"],
106
  "domain": data["domain"],
107
  "language": data["language"],
108
- "num_pairs": 1,
109
- "qa_pairs": [pair]
110
  }
111
- else:
112
- yield _id, {
113
- "id": data["id"],
114
- "domain": data["domain"],
115
- "language": data["language"],
116
- "num_pairs": data["num_pairs"],
117
- "qa_pairs": data["qa_pairs"]
118
- }
42
 
43
  _LANGUAGES = ["cs", "da", "de", "en", "es", "fi", "fr", "he", "hr", "hu", "id", "it", "nl", "no", "pl", "pt", "ro", "ru", "sv", "tr", "vi"]
44
  _URLs = {}
45
+ _URLs.update({f"{l}": {"train": [f"data/{l}/train.jsonl"], "valid": [f"data/{l}/valid.jsonl"]} for l in _LANGUAGES})
46
+ _URLs["all"] = {"train": [f"data/{l}/train.jsonl" for l in _LANGUAGES], "valid": [f"data/{l}/valid.jsonl" for l in _LANGUAGES]}
47
+ _URLs.update({f"{l}_flat": {"train": [f"data/{l}/train.jsonl"], "valid": [f"data/{l}/valid.jsonl"]} for l in _LANGUAGES})
48
+ _URLs["all_flat"] = {"train": [f"data/{l}/train.jsonl" for l in _LANGUAGES], "valid": [f"data/{l}/valid.jsonl" for l in _LANGUAGES]}
49
 
50
 
51
  class MFAQ(datasets.GeneratorBasedBuilder):
52
 
53
  VERSION = datasets.Version("1.0.0")
54
  BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.1.0")), _URLs.keys()))
55
+ DEFAULT_CONFIG_NAME = "all"
56
 
57
  def _info(self):
58
  features = datasets.Features(
86
  return [
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TRAIN,
89
+ gen_kwargs={"filepaths": data_dir["train"], "split": "train"},
90
  ),
91
  datasets.SplitGenerator(
92
  name=datasets.Split.VALIDATION,
93
+ gen_kwargs={"filepaths": data_dir["valid"], "split": "valid"},
94
  ),
95
  ]
96
 
97
  def _generate_examples(
98
+ self, filepaths, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
99
  ):
100
  """ Yields examples as (key, example) tuples. """
101
+ for filepath in filepaths:
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for _id, row in enumerate(f):
104
+ data = json.loads(row)
105
+ if "flat" in self.config.name:
106
+ for i, pair in enumerate(data["qa_pairs"]):
107
+ yield f"{filepath}_{_id}_{i}", {
108
+ "id": data["id"],
109
+ "domain": data["domain"],
110
+ "language": data["language"],
111
+ "num_pairs": 1,
112
+ "qa_pairs": [pair]
113
+ }
114
+ else:
115
+ yield f"{filepath}_{_id}", {
116
  "id": data["id"],
117
  "domain": data["domain"],
118
  "language": data["language"],
119
+ "num_pairs": data["num_pairs"],
120
+ "qa_pairs": data["qa_pairs"]
121
  }