holylovenia commited on
Commit
d104ebb
1 Parent(s): 56da8ed

Upload newsph.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. newsph.py +109 -0
newsph.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{cruz2021exploiting,
13
+ title={Exploiting news article structure for automatic corpus generation of entailment datasets},
14
+ author={Cruz, Jan Christian Blaise and Resabal, Jose Kristian and Lin, James and Velasco, Dan John and Cheng, Charibeth},
15
+ booktitle={PRICAI 2021: Trends in Artificial Intelligence: 18th Pacific Rim International Conference on Artificial Intelligence, PRICAI 2021, Hanoi, Vietnam, November 8--12, 2021, Proceedings, Part II 18},
16
+ pages={86--99},
17
+ year={2021},
18
+ organization={Springer}
19
+ }
20
+ """
21
+ _DATASETNAME = "newsph"
22
+ _LANGUAGES = ["fil", "tgl"]
23
+ _DESCRIPTION = """\
24
+ Raw collection of news articles in Filipino which can be used for language modelling.
25
+ """
26
+ _HOMEPAGE = "https://huggingface.co/datasets/newsph"
27
+ _LICENSE = Licenses.GPL_3_0.value
28
+ _LOCAL = False
29
+ _URLS = "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/newsph/newsph.zip"
30
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
31
+ _SOURCE_VERSION = "1.0.0"
32
+
33
+ _SEACROWD_VERSION = "2024.06.20"
34
+
35
+
36
+ class NewsPhDataset(datasets.GeneratorBasedBuilder):
37
+ """
38
+ Raw collection of news articles in Filipino which can be used for language modelling.
39
+ """
40
+
41
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
42
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
43
+
44
+ BUILDER_CONFIGS = [
45
+ SEACrowdConfig(
46
+ name="newsph_source",
47
+ version=SOURCE_VERSION,
48
+ description="newsph source schema",
49
+ schema="source",
50
+ subset_id="newsph",
51
+ ),
52
+ SEACrowdConfig(
53
+ name="newsph_seacrowd_ssp",
54
+ version=SEACROWD_VERSION,
55
+ description="newsph SEACrowd schema",
56
+ schema="seacrowd_ssp",
57
+ subset_id="newsph",
58
+ ),
59
+ ]
60
+
61
+ DEFAULT_CONFIG_NAME = "newsph_source"
62
+
63
+ def _info(self) -> datasets.DatasetInfo:
64
+ if self.config.schema == "source":
65
+ features = datasets.Features(
66
+ {
67
+ "id": datasets.Value("string"),
68
+ "text": datasets.Value("string"),
69
+ }
70
+ )
71
+ elif self.config.schema == "seacrowd_ssp":
72
+ features = schemas.self_supervised_pretraining.features
73
+ else:
74
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
75
+
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=features,
79
+ homepage=_HOMEPAGE,
80
+ license=_LICENSE,
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
85
+ """Returns SplitGenerators."""
86
+
87
+ data_dir = dl_manager.download_and_extract(_URLS)
88
+
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={
93
+ "filepath": os.path.join(data_dir, "newsph", "train.txt"),
94
+ "split": "train",
95
+ },
96
+ ),
97
+ ]
98
+
99
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
100
+ """Yields examples as (key, example) tuples."""
101
+ if self.config.schema == "source" or self.config.schema == "seacrowd_ssp":
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for idx, row in enumerate(f):
104
+ if row.strip():
105
+ yield idx, {"id": str(idx), "text": row}
106
+ else:
107
+ yield idx, {"id": str(idx), "text": ""}
108
+ else:
109
+ raise NotImplementedError