enoriega commited on
Commit
bda8295
1 Parent(s): 71fcf55

Added a column with the token interval of the matches for each spec sentence

Browse files
Files changed (1) hide show
  1. odinsynth_dataset.py +17 -29
odinsynth_dataset.py CHANGED
@@ -16,28 +16,7 @@ class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
16
 
17
  VERSION = datasets.Version("1.0.0")
18
 
19
- # This is an example of a dataset with multiple configurations.
20
- # If you don't want/need to define several sub-sets in your dataset,
21
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
22
-
23
- # If you need to make complex sub-parts in the datasets with configurable options
24
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
25
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
26
-
27
- # You will be able to load one or the other configurations in the following list with
28
- # data = datasets.load_dataset('my_dataset', 'first_domain')
29
- # data = datasets.load_dataset('my_dataset', 'second_domain')
30
- # BUILDER_CONFIGS = [
31
- # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
32
- # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
33
- # ]
34
- #
35
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
36
-
37
-
38
-
39
  def _info(self):
40
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
41
 
42
  features = datasets.Features(
43
  {
@@ -46,6 +25,7 @@ class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
46
  "child": datasets.Value("string"),
47
  "negative_child": datasets.Value("string"),
48
  "spec": datasets.Sequence(datasets.Value("string")),
 
49
  "step": datasets.Value("int8"),
50
  "length": datasets.Value("int8")
51
  }
@@ -69,21 +49,25 @@ class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
69
 
70
  def _build_specs(self, path:str):
71
  id_to_rule = {}
72
- specs = defaultdict(set)
 
73
  with open(path) as f:
74
  for l in tqdm(f, desc="Pre-computing specs"):
75
  try:
76
  instance = json.loads(l)
77
- rule_id = int(instance['id'])
78
- rule = instance['question']
79
- sent = instance['context']
80
- specs[rule].add(sent)
81
- id_to_rule[rule_id] = rule
 
 
 
82
  except:
83
  # TODO log
84
  pass
85
 
86
- return {rule_id:specs[rule] for rule_id, rule in id_to_rule.items()}
87
 
88
 
89
 
@@ -138,12 +122,16 @@ class OdinsynthDatasetBuilder(datasets.GeneratorBasedBuilder):
138
  for row in reader:
139
  rule_id = int(row[0])
140
  if rule_id in specs:
 
 
 
141
  yield key, {
142
  "rule_id": rule_id,
143
  "parent": row[1],
144
  "child": row[2],
145
  "negative_child": row[3],
146
- "spec": specs[rule_id],
 
147
  "step": int(row[4]),
148
  "length": int(row[5]),
149
  }
 
16
 
17
  VERSION = datasets.Version("1.0.0")
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def _info(self):
 
20
 
21
  features = datasets.Features(
22
  {
 
25
  "child": datasets.Value("string"),
26
  "negative_child": datasets.Value("string"),
27
  "spec": datasets.Sequence(datasets.Value("string")),
28
+ "matches": datasets.Sequence(datasets.Sequence(datasets.Value("int16"))),
29
  "step": datasets.Value("int8"),
30
  "length": datasets.Value("int8")
31
  }
 
49
 
50
  def _build_specs(self, path:str):
51
  id_to_rule = {}
52
+ specs = defaultdict(list)
53
+ matches = defaultdict(list)
54
  with open(path) as f:
55
  for l in tqdm(f, desc="Pre-computing specs"):
56
  try:
57
  instance = json.loads(l)
58
+ if instance['match']:
59
+ rule_id = int(instance['id'])
60
+ rule = instance['question']
61
+ sent = instance['context']
62
+ if sent not in specs[rule]:
63
+ specs[rule].append(sent)
64
+ matches[rule].append([instance['match_start'], instance['match_end']])
65
+ id_to_rule[rule_id] = rule
66
  except:
67
  # TODO log
68
  pass
69
 
70
+ return {rule_id:(specs[rule], matches[rule]) for rule_id, rule in id_to_rule.items()}
71
 
72
 
73
 
 
122
  for row in reader:
123
  rule_id = int(row[0])
124
  if rule_id in specs:
125
+ spec, matches = specs[rule_id]
126
+ assert len(spec) == len(matches), f"Rule id {id} has different number of sentences and matches"
127
+
128
  yield key, {
129
  "rule_id": rule_id,
130
  "parent": row[1],
131
  "child": row[2],
132
  "negative_child": row[3],
133
+ "spec": spec,
134
+ "matches": matches,
135
  "step": int(row[4]),
136
  "length": int(row[5]),
137
  }