khalidalt commited on
Commit
7f1af59
1 Parent(s): 0883cdc

Update tydiqa-primary.py

Browse files
Files changed (1) hide show
  1. tydiqa-primary.py +78 -49
tydiqa-primary.py CHANGED
@@ -47,49 +47,43 @@ class tydiqa_Primary(datasets.GeneratorBasedBuilder):
47
  # TODO(tydiqa): Specifies the datasets.DatasetInfo object
48
 
49
  return datasets.DatasetInfo(
50
- # This is the description that will appear on the datasets page.
51
- description=_DESCRIPTION,
52
- # datasets.features.FeatureConnectors
53
- features=datasets.Features(
54
- {
55
- "passage_answer_candidates": datasets.features.Sequence(
56
- {
57
- "plaintext_start_byte": datasets.Value("int32"),
58
- "plaintext_end_byte": datasets.Value("int32"),
59
- }
60
- ),
61
- "question_text": datasets.Value("string"),
62
- "document_title": datasets.Value("string"),
63
- "language": datasets.Value("string"),
64
- "annotations": datasets.features.Sequence(
65
- {
66
- #'annotation_id': datasets.Value('int32'),
67
- 'minimal_answer': datasets.features.Sequence(
68
- {
69
- 'plaintext_start_byte':datasets.Value("int32"),
70
- 'plaintext_end_byte':datasets.Value("int32"),
71
-
72
-
73
- }
74
- ),
75
-
76
- "yes_no_answer": datasets.Value("string"),
77
- }
78
- ),
79
- "document_plaintext": datasets.Value("string"),
80
- # 'example_id': datasets.Value('variant'),
81
- "document_url": datasets.Value("string")
82
- # These are the features of your dataset like images, labels ...
83
- }
84
- ),
85
- # If there's a common (input, target) tuple from the features,
86
- # specify them here. They'll be used if as_supervised=True in
87
- # builder.as_dataset.
88
- supervised_keys=None,
89
- # Homepage of the dataset for documentation
90
- homepage="https://github.com/google-research-datasets/tydiqa",
91
- citation=_CITATION,
92
- )
93
 
94
  def _split_generators(self, dl_manager):
95
  """Returns SplitGenerators."""
@@ -117,10 +111,45 @@ class tydiqa_Primary(datasets.GeneratorBasedBuilder):
117
  # TODO(tydiqa): Yields (key, example) tuples from the dataset
118
 
119
  with open(filepath, encoding="utf-8") as f:
120
- print("So far so good")
121
- for _id,row in enumerate(f):
122
-
123
  data = json.loads(row)
124
-
125
-
126
- yield _id, data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # TODO(tydiqa): Specifies the datasets.DatasetInfo object
48
 
49
  return datasets.DatasetInfo(
50
+ # This is the description that will appear on the datasets page.
51
+ description=_DESCRIPTION,
52
+ # datasets.features.FeatureConnectors
53
+ features=datasets.Features(
54
+ {
55
+ "passage_answer_candidates": datasets.features.Sequence(
56
+ {
57
+ "plaintext_start_byte": datasets.Value("int32"),
58
+ "plaintext_end_byte": datasets.Value("int32"),
59
+ }
60
+ ),
61
+ "question_text": datasets.Value("string"),
62
+ "document_title": datasets.Value("string"),
63
+ "language": datasets.Value("string"),
64
+ "annotations": datasets.features.Sequence(
65
+ {
66
+ # 'annotation_id': datasets.Value('variant'),
67
+ "passage_answer_candidate_index": datasets.Value("int32"),
68
+ "minimal_answers_start_byte": datasets.Value("int32"),
69
+ "minimal_answers_end_byte": datasets.Value("int32"),
70
+ "yes_no_answer": datasets.Value("string"),
71
+ }
72
+ ),
73
+ "document_plaintext": datasets.Value("string"),
74
+ # 'example_id': datasets.Value('variant'),
75
+ "document_url": datasets.Value("string")
76
+ # These are the features of your dataset like images, labels ...
77
+ }
78
+ ),
79
+ # If there's a common (input, target) tuple from the features,
80
+ # specify them here. They'll be used if as_supervised=True in
81
+ # builder.as_dataset.
82
+ supervised_keys=None,
83
+ # Homepage of the dataset for documentation
84
+ homepage="https://github.com/google-research-datasets/tydiqa",
85
+ citation=_CITATION,
86
+ )
 
 
 
 
 
 
87
 
88
  def _split_generators(self, dl_manager):
89
  """Returns SplitGenerators."""
 
111
  # TODO(tydiqa): Yields (key, example) tuples from the dataset
112
 
113
  with open(filepath, encoding="utf-8") as f:
114
+ for id_, row in enumerate(f):
 
 
115
  data = json.loads(row)
116
+ passages = data["passage_answer_candidates"]
117
+ end_byte = [passage["plaintext_end_byte"] for passage in passages]
118
+ start_byte = [passage["plaintext_start_byte"] for passage in passages]
119
+ title = data["document_title"]
120
+ lang = data["language"]
121
+ question = data["question_text"]
122
+ annotations = data["annotations"]
123
+ # annot_ids = [annotation["annotation_id"] for annotation in annotations]
124
+ yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations]
125
+ min_answers_end_byte = [
126
+ annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations
127
+ ]
128
+ min_answers_start_byte = [
129
+ annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations
130
+ ]
131
+ passage_cand_answers = [
132
+ annotation["passage_answer"]["candidate_index"] for annotation in annotations
133
+ ]
134
+ doc = data["document_plaintext"]
135
+ # example_id = data["example_id"]
136
+ url = data["document_url"]
137
+ yield id_, {
138
+ "passage_answer_candidates": {
139
+ "plaintext_start_byte": start_byte,
140
+ "plaintext_end_byte": end_byte,
141
+ },
142
+ "question_text": question,
143
+ "document_title": title,
144
+ "language": lang,
145
+ "annotations": {
146
+ # 'annotation_id': annot_ids,
147
+ "passage_answer_candidate_index": passage_cand_answers,
148
+ "minimal_answers_start_byte": min_answers_start_byte,
149
+ "minimal_answers_end_byte": min_answers_end_byte,
150
+ "yes_no_answer": yes_no_answers,
151
+ },
152
+ "document_plaintext": doc,
153
+ # 'example_id': example_id,
154
+ "document_url": url,
155
+ }