enoriega commited on
Commit
d60afdd
1 Parent(s): 70ebd26

Upload odinsynth_dataset.py

Browse files
Files changed (1) hide show
  1. odinsynth_dataset.py +153 -0
odinsynth_dataset.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ from collections import defaultdict
5
+
6
+ import datasets
7
+ from tqdm import tqdm
8
+
9
+ _DESCRIPTION = """\
10
+ Supervised training data for odinsynth
11
+ """
12
+
13
+
14
+
15
+ class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
16
+
17
+ VERSION = datasets.Version("1.0.0")
18
+
19
+ # This is an example of a dataset with multiple configurations.
20
+ # If you don't want/need to define several sub-sets in your dataset,
21
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
22
+
23
+ # If you need to make complex sub-parts in the datasets with configurable options
24
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
25
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
26
+
27
+ # You will be able to load one or the other configurations in the following list with
28
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
29
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
30
+ # BUILDER_CONFIGS = [
31
+ # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
32
+ # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
33
+ # ]
34
+ #
35
+ # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
36
+
37
+
38
+
39
+ def _info(self):
40
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
41
+
42
+ features = datasets.Features(
43
+ {
44
+ "rule_id": datasets.Value("int32"),
45
+ "source": datasets.Value("string"),
46
+ "destination": datasets.Value("string"),
47
+ "spec": datasets.Sequence(datasets.Value("string")),
48
+ "step": datasets.Value("int8"),
49
+ "length": datasets.Value("int8")
50
+ # These are the features of your dataset like images, labels ...
51
+ }
52
+ )
53
+
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # This defines the different columns of the dataset and their types
58
+ features=features, # Here we define them above because they are different between the two configurations
59
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
60
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
61
+ # supervised_keys=("sentence", "label"),
62
+ # Homepage of the dataset for documentation
63
+ # homepage=_HOMEPAGE,
64
+ # # License for the dataset if available
65
+ # license=_LICENSE,
66
+ # # Citation for the dataset
67
+ # citation=_CITATION,
68
+ )
69
+
70
+ def _build_specs(self, path:str):
71
+ id_to_rule = {}
72
+ specs = defaultdict(set)
73
+ with open(path) as f:
74
+ for l in tqdm(f, desc="Pre-computing specs"):
75
+ try:
76
+ instance = json.loads(l)
77
+ rule_id = int(instance['id'])
78
+ rule = instance['question']
79
+ sent = instance['context']
80
+ specs[rule].add(sent)
81
+ id_to_rule[rule_id] = rule
82
+ except:
83
+ # TODO log
84
+ pass
85
+
86
+ return {rule_id:specs[rule] for rule_id, rule in id_to_rule.items()}
87
+
88
+
89
+
90
+ def _split_generators(self, dl_manager):
91
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
92
+
93
+ JSON_PATH = os.path.join('merged_train_split_train.jsonl')
94
+ archive_iter = dl_manager.iter_archive('data.tar.bz2')
95
+ specs = self._build_specs(JSON_PATH)
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TRAIN,
99
+ # These kwargs will be passed to _generate_examples
100
+ gen_kwargs={
101
+ "archive_iter": dl_manager.iter_archive('data.tar.bz2'),
102
+ "specs": specs,
103
+ "split": "train",
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={
110
+ "archive_iter": dl_manager.iter_archive('data.tar.bz2'),
111
+ "specs": specs,
112
+ "split": "test",
113
+ },
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.VALIDATION,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={
119
+ "archive_iter": dl_manager.iter_archive('data.tar.bz2'),
120
+ "specs": specs,
121
+ "split": "val",
122
+ },
123
+ ),
124
+ ]
125
+
126
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
127
+ def _generate_examples(self, archive_iter, specs, split):
128
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
129
+
130
+ key = 0
131
+ for tsv_path, file in archive_iter:
132
+ if tsv_path.startswith(split) and tsv_path.endswith(".tsv"):
133
+ # Read the lines
134
+ reader = csv.reader((l.decode() for l in file), delimiter='\t')
135
+ for row in reader:
136
+ rule_id = int(row[0])
137
+ if rule_id in specs:
138
+ yield key, {
139
+ "rule_id": rule_id,
140
+ "source": row[1],
141
+ "destination": row[2],
142
+ "spec": specs[rule_id],
143
+ "step": int(row[3]),
144
+ "length": int(row[4]),
145
+ }
146
+ # Increase the key after yielding the instacne
147
+ key += 1
148
+
149
+
150
+ if __name__ == "__main__":
151
+ ds = OdinsynthDatasetBuilder()
152
+ ds.download_and_prepare()
153
+ print(ds.cache_dir)