leondz commited on
Commit
6dda140
1 Parent(s): 930af52

add reader for DAST

Browse files
Files changed (3) hide show
  1. dast.jsonl +0 -0
  2. dataset_infos.json +1 -0
  3. dkstance.py +199 -0
dast.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dkstance": {"description": "This dataset presents a series of stories on Reddit and the conversation around\nthem, annotated for stance. Stories are also annotated for veracity.\n\nFor more details see https://aclanthology.org/W19-6122/\n", "citation": "@inproceedings{lillie-etal-2019-joint,\n title = \"Joint Rumour Stance and Veracity Prediction\",\n author = \"Lillie, Anders Edelbo and\n Middelboe, Emil Refsgaard and\n Derczynski, Leon\",\n booktitle = \"Proceedings of the 22nd Nordic Conference on Computational Linguistics\",\n month = sep # \"{--}\" # oct,\n year = \"2019\",\n address = \"Turku, Finland\",\n publisher = {Link{\"o}ping University Electronic Press},\n url = \"https://aclanthology.org/W19-6122\",\n pages = \"208--221\",\n}\n", "homepage": "https://aclanthology.org/W19-6122/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "native_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "parent_id": {"dtype": "string", "id": null, "_type": "Value"}, "parent_text": {"dtype": "string", "id": null, "_type": "Value"}, "parent_stance": {"num_classes": 4, "names": ["Supporting", "Denying", "Querying", "Commenting"], "id": null, "_type": "ClassLabel"}, "source_id": {"dtype": "string", "id": null, "_type": "Value"}, "source_text": {"dtype": "string", "id": null, "_type": "Value"}, "source_stance": {"num_classes": 4, "names": ["Supporting", "Denying", "Querying", "Commenting"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "dast", "config_name": "dkstance", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2446983, "num_examples": 3122, "dataset_name": "dast"}, "validation": {"name": "validation", "num_bytes": 694731, "num_examples": 1066, "dataset_name": "dast"}, "test": {"name": "test", "num_bytes": 730762, "num_examples": 1060, "dataset_name": "dast"}}, "download_checksums": {"dast.jsonl": {"num_bytes": 4944186, "checksum": "a397a77469fc7fe11c7621f2e619f928a28ece934bd6c11b179b58154a70b450"}}, "download_size": 4944186, "post_processing_size": null, "dataset_size": 3872476, "size_in_bytes": 8816662}}
dkstance.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Leon Derczynski, HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Danish Stance Dataset DAST"""
18
+
19
+ from collections import defaultdict
20
+ import glob
21
+ import json
22
+ import os
23
+ import sys
24
+
25
+ import datasets
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+ _CITATION = """\
30
+ @inproceedings{lillie-etal-2019-joint,
31
+ title = "Joint Rumour Stance and Veracity Prediction",
32
+ author = "Lillie, Anders Edelbo and
33
+ Middelboe, Emil Refsgaard and
34
+ Derczynski, Leon",
35
+ booktitle = "Proceedings of the 22nd Nordic Conference on Computational Linguistics",
36
+ month = sep # "{--}" # oct,
37
+ year = "2019",
38
+ address = "Turku, Finland",
39
+ publisher = {Link{\"o}ping University Electronic Press},
40
+ url = "https://aclanthology.org/W19-6122",
41
+ pages = "208--221",
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ This dataset presents a series of stories on Reddit and the conversation around
47
+ them, annotated for stance. Stories are also annotated for veracity.
48
+
49
+ For more details see https://aclanthology.org/W19-6122/
50
+ """
51
+
52
+ _URL = "dast.jsonl"
53
+
54
+
55
+ class DastConfig(datasets.BuilderConfig):
56
+ """BuilderConfig for IPM NEL"""
57
+
58
+ def __init__(self, **kwargs):
59
+ """BuilderConfig for IPM NEL.
60
+
61
+ Args:
62
+ **kwargs: keyword arguments forwarded to super.
63
+ """
64
+ super(DastConfig, self).__init__(**kwargs)
65
+
66
+
67
+
68
+ class Dast(datasets.GeneratorBasedBuilder):
69
+
70
+
71
+ """Dast dataset."""
72
+
73
+ BUILDER_CONFIGS = [
74
+ DastConfig(name="dkstance", version=datasets.Version("1.0.0"), description="Danish Stance"),
75
+ ]
76
+
77
+ def _info(self):
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "native_id": datasets.Value("string"),
84
+ "text": datasets.Value("string"),
85
+ "parent_id": datasets.Value("string"),
86
+ "parent_text": datasets.Value("string"),
87
+ "parent_stance": datasets.features.ClassLabel(
88
+ names=[
89
+ "Supporting",
90
+ "Denying",
91
+ "Querying",
92
+ "Commenting",
93
+ ]
94
+ ),
95
+ "source_id": datasets.Value("string"),
96
+ "source_text": datasets.Value("string"),
97
+ "source_stance": datasets.features.ClassLabel(
98
+ names=[
99
+ "Supporting",
100
+ "Denying",
101
+ "Querying",
102
+ "Commenting",
103
+ ]
104
+ ),
105
+
106
+ }
107
+ ),
108
+ supervised_keys=None,
109
+ homepage="https://aclanthology.org/W19-6122/",
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+ """Returns SplitGenerators."""
115
+ downloaded_file = dl_manager.download_and_extract(_URL)
116
+ print(downloaded_file)
117
+ data_files = {
118
+ "dast": downloaded_file,
119
+ }
120
+
121
+ return [
122
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files['dast'], "split":"train"}),
123
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files['dast'], "split":"validation"}),
124
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files['dast'], "split":"test"}),
125
+ ]
126
+
127
+
128
+ def unpack(self, entry, parent_id = None, source_id = None):
129
+
130
+ if isinstance(entry, dict):
131
+ e = entry['comment']
132
+ original_id = e['comment_id']
133
+ text = e['text']
134
+ parent_id = e['parent_id']
135
+ parent_stance = e['SDQC_Parent']
136
+ source_id = e['submission_id']
137
+ source_stance = e['SDQC_Submission']
138
+
139
+ self.texts[original_id] = text
140
+
141
+ instance = {
142
+ "id":self.guid,
143
+ "native_id":original_id,
144
+ "text":text,
145
+ "parent_id":parent_id,
146
+ "parent_text":self.texts[parent_id],
147
+ "parent_stance":parent_stance,
148
+ "source_id":source_id,
149
+ "source_text":self.texts[source_id],
150
+ "source_stance":source_stance,
151
+ }
152
+
153
+ self.id_mapper[e['comment_id']] = self.guid
154
+ self.guid += 1
155
+ yield instance
156
+
157
+ elif isinstance(entry, list):
158
+ for sub_entry in entry:
159
+ yield from self.unpack(sub_entry, parent_id=parent_id, source_id=source_id)
160
+
161
+
162
+ def process_block(self, block):
163
+
164
+ j = json.loads(block)
165
+ s = j['redditSubmission']
166
+ descr = s['RumourDescription']
167
+ source_id = s['submission_id']
168
+ #print(i, '', descr, '', '', s['title'], s['SourceSDQC'])
169
+ self.id_mapper[source_id] = self.guid
170
+ self.guid += 1
171
+ self.texts[source_id] = s['title']
172
+
173
+ yield from self.unpack(j['branches'], source_id = 0, parent_id = 0)
174
+
175
+
176
+ def _generate_examples(self, filepath, split):
177
+ logger.info("⏳ Generating %s examples from = %s", (split, filepath))
178
+
179
+ def _deleted():
180
+ return "[deleted]"
181
+
182
+ self.guid = 0
183
+ self.id_mapper = {}
184
+ self.texts = defaultdict(_deleted)
185
+
186
+ partition_sources = ()
187
+ if split == 'train':
188
+ partition_sources = ('8sjevz', 'a0954m', 'a1gsmt', 'a2fpjr', 'a6o3us', 'ax70y5', 'axnshu', 'b23eat', 'b2xrgd', 'b72gok', 'b7aybw', 'b7ohqt', 'bb9iqt')
189
+ elif split == 'validation':
190
+ partition_sources = ('6v1ivh', '76y6rb', '7r9ouo', '8192oe', '83l9nm', '8agt1s', '8clb74', '8k6lcb')
191
+ elif split == 'test':
192
+ partition_sources = ('3qc12m', '3ud5z9', '53u5j7', '5emjyw', '5pfq1r', '5t1h6y', '60il0b', '67c2zf', '6jqtkm', '6nz7dy', '6szxwj', '6tm5kp')
193
+
194
+ with open(filepath, 'r', encoding="utf-8") as dastfile:
195
+ for line in dastfile:
196
+ instances = self.process_block(line.strip())
197
+ for instance in instances:
198
+ if instance['source_id'] in partition_sources:
199
+ yield instance['id'], instance