arxyzan commited on
Commit
0213f44
1 Parent(s): e1b0ae5

Create xlsum_fa.py

Browse files
Files changed (1) hide show
  1. xlsum_fa.py +102 -0
xlsum_fa.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+
3
+ import datasets
4
+ from datasets.tasks import Summarization
5
+
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{hasan-etal-2021-xl,
12
+ title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages",
13
+ author = "Hasan, Tahmid and
14
+ Bhattacharjee, Abhik and
15
+ Islam, Md. Saiful and
16
+ Mubasshir, Kazi and
17
+ Li, Yuan-Fang and
18
+ Kang, Yong-Bin and
19
+ Rahman, M. Sohel and
20
+ Shahriyar, Rifat",
21
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
22
+ month = aug,
23
+ year = "2021",
24
+ address = "Online",
25
+ publisher = "Association for Computational Linguistics",
26
+ url = "https://aclanthology.org/2021.findings-acl.413",
27
+ pages = "4693--4703",
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """Persian portion of the XLSum Dataset"""
32
+
33
+ _DOWNLOAD_URLS = {
34
+ "train": "https://huggingface.co/datasets/hezarai/xlsum-fa/resolve/main/xlsum-fa_train.csv",
35
+ "test": "https://huggingface.co/datasets/hezarai/xlsum-fa/resolve/main/xlsum-fa_test.csv",
36
+ }
37
+
38
+
39
+ class XLSumFaConfig(datasets.BuilderConfig):
40
+ def __init__(self, **kwargs):
41
+ super(XLSumFaConfig, self).__init__(**kwargs)
42
+
43
+
44
+ class XLSumFa(datasets.GeneratorBasedBuilder):
45
+ BUILDER_CONFIGS = [
46
+ XLSumFaConfig(
47
+ name="xlsum-fa",
48
+ version=datasets.Version("1.0.0"),
49
+ description=_DESCRIPTION,
50
+ ),
51
+ ]
52
+
53
+ def _info(self):
54
+ text_column = "text"
55
+ summary_column = "summary"
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features(
59
+ {text_column: datasets.Value("string"),
60
+ summary_column: datasets.features.Value("string")}
61
+ ),
62
+ homepage="https://huggingface.co/datasets/hezarai/xlsum-fa",
63
+ citation=_CITATION,
64
+ task_templates=[Summarization(text_column=text_column, summary_column=summary_column)],
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ """
69
+ Returns SplitGenerators.
70
+ """
71
+ train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
72
+ test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
73
+
74
+ return [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
80
+ ),
81
+ ]
82
+
83
+ def _generate_examples(self, filepath):
84
+ """
85
+ Per each file_path read the csv file and iterate it.
86
+ For each row yield a tuple of (id, {"text": ..., "summary": ..., ...})
87
+ Each call to this method yields an output like below:
88
+ ```
89
+ (123, {"text": "...", "summary": "..."})
90
+ ```
91
+ """
92
+ logger.info("⏳ Generating examples from = %s", filepath)
93
+ with open(filepath, encoding="utf-8") as csv_file:
94
+ csv_reader = csv.reader(
95
+ csv_file, quotechar='"', skipinitialspace=True
96
+ )
97
+
98
+ next(csv_reader, None)
99
+
100
+ for id_, row in enumerate(csv_reader):
101
+ text, label = row
102
+ yield id_, {"text": text, "summary": label}