File size: 5,725 Bytes
d60afdd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71fcf55
 
 
d60afdd
bda8295
d60afdd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bda8295
 
d60afdd
 
 
 
bda8295
 
 
 
 
 
 
 
d60afdd
 
 
 
bda8295
d60afdd
 
 
 
 
 
555dc02
8d9d564
 
 
 
d60afdd
 
 
 
 
 
8d9d564
d60afdd
 
 
 
 
 
 
 
8d9d564
d60afdd
 
 
 
 
 
 
 
8d9d564
d60afdd
 
 
 
 
 
 
 
 
 
 
 
8d9d564
d60afdd
 
 
 
 
bda8295
 
 
d60afdd
 
71fcf55
 
 
bda8295
 
71fcf55
 
d60afdd
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import csv
import json
import os
from collections import defaultdict

import datasets
from tqdm import tqdm

_DESCRIPTION = """\
Supervised training data for odinsynth
"""



class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):

        features = datasets.Features(
            {
                "rule_id": datasets.Value("int32"),
                "parent": datasets.Value("string"),
                "child": datasets.Value("string"),
                "negative_child": datasets.Value("string"),
                "spec": datasets.Sequence(datasets.Value("string")),
                "matches": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
                "step": datasets.Value("int8"),
                "length": datasets.Value("int8")
            }
        )

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            # homepage=_HOMEPAGE,
            # # License for the dataset if available
            # license=_LICENSE,
            # # Citation for the dataset
            # citation=_CITATION,
        )

    def _build_specs(self, path:str):
        id_to_rule = {}
        specs = defaultdict(list)
        matches = defaultdict(list)
        with open(path) as f:
            for l in tqdm(f, desc="Pre-computing specs"):
                try:
                    instance = json.loads(l)
                    if instance['match']:
                        rule_id = int(instance['id'])
                        rule = instance['question']
                        sent  = instance['context']
                        if sent not in specs[rule]:
                            specs[rule].append(sent)
                            matches[rule].append([instance['match_start'], instance['match_end']])
                            id_to_rule[rule_id] = rule
                except:
                    # TODO log
                    pass

        return {rule_id:(specs[rule], matches[rule]) for rule_id, rule in id_to_rule.items()}



    def _split_generators(self, dl_manager):
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        JSON_PATH = dl_manager.download_and_extract('merged_train_split_train.jsonl.gz')
        TRAIN_ARCHIVE_PATH = dl_manager.download('train.tar.bz2')
        VAL_ARCHIVE_PATH = dl_manager.download('val.tar.bz2')
        TEST_ARCHIVE_PATH = dl_manager.download('test.tar.bz2')

        specs = self._build_specs(JSON_PATH)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "archive_iter": dl_manager.iter_archive(TRAIN_ARCHIVE_PATH),
                    "specs": specs,
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "archive_iter": dl_manager.iter_archive(TEST_ARCHIVE_PATH),
                    "specs": specs,
                    "split": "test",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "archive_iter": dl_manager.iter_archive(VAL_ARCHIVE_PATH),
                    "specs": specs,
                    "split": "val",
                },
            ),
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, archive_iter, specs, split):
        # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.

        key = 0
        for tsv_path, file in archive_iter:
            if tsv_path.endswith(".tsv"):
                # Read the lines
                reader = csv.reader((l.decode() for l in  file), delimiter='\t')
                for row in reader:
                    rule_id = int(row[0])
                    if rule_id in specs:
                        spec, matches = specs[rule_id]
                        assert len(spec) == len(matches), f"Rule id {id} has different number of sentences and matches"

                        yield key, {
                            "rule_id": rule_id,
                            "parent": row[1],
                            "child": row[2],
                            "negative_child": row[3],
                            "spec": spec,
                            "matches": matches,
                            "step": int(row[4]),
                            "length": int(row[5]),
                        }
                        # Increase the key after yielding the instacne
                        key += 1


if __name__  == "__main__":
    ds = OdinsynthDatasetBuilder()
    ds.download_and_prepare()
    print(ds.cache_dir)