holylovenia commited on
Commit
6c7133d
1 Parent(s): f1e7b0e

Upload vsolscsum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vsolscsum.py +197 -0
vsolscsum.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import xml.etree.ElementTree as ET
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{nguyen-etal-2016-vsolscsum,
14
+ title = "{VS}o{LSCS}um: Building a {V}ietnamese Sentence-Comment Dataset for Social Context Summarization",
15
+ author = "Nguyen, Minh-Tien and
16
+ Lai, Dac Viet and
17
+ Do, Phong-Khac and
18
+ Tran, Duc-Vu and
19
+ Nguyen, Minh-Le",
20
+ editor = "Hasida, Koiti and
21
+ Wong, Kam-Fai and
22
+ Calzorari, Nicoletta and
23
+ Choi, Key-Sun",
24
+ booktitle = "Proceedings of the 12th Workshop on {A}sian Language Resources ({ALR}12)",
25
+ month = dec,
26
+ year = "2016",
27
+ address = "Osaka, Japan",
28
+ publisher = "The COLING 2016 Organizing Committee",
29
+ url = "https://aclanthology.org/W16-5405",
30
+ pages = "38--48",
31
+ }
32
+ """
33
+
34
+ _DATASETNAME = "vsolscsum"
35
+
36
+ _DESCRIPTION = """
37
+ The Vietnamese dataset for social context summarization \
38
+ The dataset contains 141 open-domain articles along with \
39
+ 3,760 sentences, 2,448 extracted standard sentences and \
40
+ comments as standard summaries and 6,926 comments in 12 \
41
+ events. This dataset was manually annotated by human. \
42
+ Note that the extracted standard summaries also include comments.\
43
+ The label of a sentence or comment was generated based on the \
44
+ voting among social annotators. For example, given a sentence, \
45
+ each annotator makes a binary decision in order to indicate \
46
+ that whether this sentence is a summary candidate (YES) or not \
47
+ (NO). If three annotators agree yes, this sentences is labeled by 3. \
48
+ Therefore, the label of each sentence or comment ranges from 1 to 5\
49
+ (1: very poor, 2: poor, 3: fair, 4: good; 5: perfect). The standard \
50
+ summary sentences are those which receive at least three agreements \
51
+ from annotators. The inter-agreement calculated by Cohen's Kappa \
52
+ after validation among annotators is 0.685.
53
+ """
54
+
55
+ _HOMEPAGE = "https://github.com/nguyenlab/VSoLSCSum-Dataset"
56
+
57
+ _LANGUAGES = ["vie"]
58
+
59
+ _LICENSE = Licenses.CC_BY_4_0.value
60
+
61
+ _LOCAL = False
62
+
63
+ _URLS = {
64
+ _DATASETNAME: "https://raw.githubusercontent.com/nguyenlab/VSoLSCSum-Dataset/master/VSoSLCSum.xml",
65
+ }
66
+
67
+ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
68
+
69
+ _SOURCE_VERSION = "1.0.0"
70
+
71
+ _SEACROWD_VERSION = "2024.06.20"
72
+
73
+
74
+ class VSolSCSumDataset(datasets.GeneratorBasedBuilder):
75
+ """
76
+ The Vietnamese dataset for social context summarization includes 141 articles
77
+ with a total of 3,760 sentences. It also contains 2,448 standard sentences
78
+ extracted along with comments serving as standard summaries, and 6,926 c
79
+ omments across 12 events. Human annotators manually curated this dataset.
80
+ Each sentence or comment received a label from 1 to 5 based on annotators'
81
+ agreement (1: very poor, 2: poor, 3: fair, 4: good, 5: perfect). Standard
82
+ summary sentences are those with at least three agreements. The inter-agreement
83
+ among annotators, measured by Cohen's Kappa, is 0.685.
84
+ """
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
88
+
89
+ BUILDER_CONFIGS = [
90
+ SEACrowdConfig(
91
+ name=f"{_DATASETNAME}_source",
92
+ version=SOURCE_VERSION,
93
+ description=f"{_DATASETNAME} source schema",
94
+ schema="source",
95
+ subset_id=f"{_DATASETNAME}",
96
+ ),
97
+ SEACrowdConfig(
98
+ name=f"{_DATASETNAME}_seacrowd_t2t",
99
+ version=SEACROWD_VERSION,
100
+ description=f"{_DATASETNAME} SEACrowd schema",
101
+ schema="seacrowd_t2t",
102
+ subset_id=f"{_DATASETNAME}",
103
+ ),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
107
+
108
+ def _info(self) -> datasets.DatasetInfo:
109
+
110
+ if self.config.schema == "source":
111
+ features = datasets.Features(
112
+ {
113
+ "post_id": datasets.Value("string"),
114
+ "title": datasets.Value("string"),
115
+ "summary": datasets.Value("string"),
116
+ "document_and_comment": datasets.Value("string"),
117
+ }
118
+ )
119
+
120
+ elif self.config.schema == "seacrowd_t2t":
121
+ features = schemas.text2text_features
122
+
123
+ return datasets.DatasetInfo(
124
+ description=_DESCRIPTION,
125
+ features=features,
126
+ homepage=_HOMEPAGE,
127
+ license=_LICENSE,
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
132
+ """Returns SplitGenerators."""
133
+
134
+ data_path = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
135
+
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TRAIN,
139
+ gen_kwargs={
140
+ "filepath": data_path,
141
+ "split": "train",
142
+ },
143
+ )
144
+ ]
145
+
146
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
147
+ """Yields examples as (key, example) tuples."""
148
+
149
+ with open(filepath, "r", encoding="utf-8") as file:
150
+ xml_content = file.read()
151
+
152
+ root = ET.fromstring(xml_content)
153
+
154
+ def extract_data_from_xml(root):
155
+ data = []
156
+
157
+ for post in root.findall(".//post"):
158
+ post_id = post.get("id")
159
+ title = post.find("title").text
160
+ summary_sentences = [sentence.find("content").text for sentence in post.find(".//summary").find("sentences").findall("sentence")]
161
+ document_sentences = [sentence.find("content").text for sentence in post.find(".//document").find("sentences").findall("sentence")]
162
+ comment_sentences = [sentence.find("content").text for sentence in post.find(".//comments").find(".//comment").find("sentences").findall("sentence")]
163
+
164
+ summary_text = " ".join(summary_sentences)
165
+ document_text = " ".join(document_sentences)
166
+ comment_text = " ".join(comment_sentences)
167
+
168
+ data.append(
169
+ {
170
+ "post_id": post_id,
171
+ "title": title,
172
+ "summary": summary_text,
173
+ "document_and_comment": f"{document_text} | {comment_text}",
174
+ }
175
+ )
176
+
177
+ return data
178
+
179
+ extracted_data = extract_data_from_xml(root)
180
+ df = pd.DataFrame(extracted_data)
181
+
182
+ for index, row in df.iterrows():
183
+
184
+ if self.config.schema == "source":
185
+ example = row.to_dict()
186
+
187
+ elif self.config.schema == "seacrowd_t2t":
188
+
189
+ example = {
190
+ "id": str(row["post_id"]),
191
+ "text_1": str(row["summary"]),
192
+ "text_2": str(row["document_and_comment"]),
193
+ "text_1_name": "summary",
194
+ "text_2_name": "document_and_comment",
195
+ }
196
+
197
+ yield index, example