holylovenia commited on
Commit
0ecc70b
1 Parent(s): ebcd14f

Upload codeswitch_reddit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. codeswitch_reddit.py +209 -0
codeswitch_reddit.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import html
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """
28
+ @inproccedings{rabinovich-2019-codeswitchreddit,
29
+ author = {Rabinovich, Ella and Sultani, Masih and Stevenson, Suzanne},
30
+ title = {CodeSwitch-Reddit: Exploration of Written Multilingual Discourse in Online Discussion Forums},
31
+ booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing},
32
+ publisher = {Association for Computational Linguistics},
33
+ year = {2019},
34
+ url = {https://aclanthology.org/D19-1484},
35
+ doi = {10.18653/v1/D19-1484},
36
+ pages = {4776--4786},
37
+ }
38
+ """
39
+
40
+ _LOCAL = False
41
+ _LANGUAGES = ["eng", "ind", "tgl"]
42
+ _DATASETNAME = "codeswitch_reddit"
43
+ _DESCRIPTION = """
44
+ This corpus consists of monolingual English and multilingual (English and one other language) posts
45
+ from country-specific subreddits, including r/indonesia, r/philippines and r/singapore for Southeast Asia.
46
+ Posts were manually classified whether they contained code-switching or not.
47
+ """
48
+
49
+ _HOMEPAGE = "https://github.com/ellarabi/CodeSwitch-Reddit"
50
+ _LICENSE = Licenses.UNKNOWN.value
51
+ _URL = "http://www.cs.toronto.edu/~ella/code-switch.reddit.tar.gz"
52
+
53
+ _SUPPORTED_TASKS = [Tasks.CODE_SWITCHING_IDENTIFICATION, Tasks.SELF_SUPERVISED_PRETRAINING]
54
+ _SOURCE_VERSION = "1.0.0"
55
+ _SEACROWD_VERSION = "2024.06.20"
56
+
57
+
58
+ class CodeSwitchRedditDataset(datasets.GeneratorBasedBuilder):
59
+ """Dataset of monolingual English and multilingual comments from country-specific subreddits."""
60
+
61
+ SUBSETS = ["cs", "eng_monolingual"]
62
+ INCLUDED_SUBREDDITS = ["indonesia", "Philippines", "singapore"]
63
+ INCLUDED_LANGUAGES = {"English": "eng", "Indonesian": "ind", "Tagalog": "tgl"}
64
+
65
+ BUILDER_CONFIGS = [
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_{subset}_source",
68
+ version=datasets.Version(_SOURCE_VERSION),
69
+ description=f"{_DATASETNAME} source schema for {subset} subset",
70
+ schema="source",
71
+ subset_id=f"{_DATASETNAME}_{subset}",
72
+ )
73
+ for subset in SUBSETS
74
+ ] + [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_eng_monolingual_seacrowd_ssp",
77
+ version=datasets.Version(_SEACROWD_VERSION),
78
+ description=f"{_DATASETNAME} SEACrowd ssp schema for eng_monolingual subset",
79
+ schema="seacrowd_ssp",
80
+ subset_id=f"{_DATASETNAME}_eng_monolingual",
81
+ ),
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_cs_seacrowd_text_multi",
84
+ version=datasets.Version(_SEACROWD_VERSION),
85
+ description=f"{_DATASETNAME} SEACrowd text multilabel schema for cs subset",
86
+ schema="seacrowd_text_multi",
87
+ subset_id=f"{_DATASETNAME}_cs",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_cs_source"
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+ if self.config.schema == "source":
95
+ if "cs" in self.config.subset_id:
96
+ features = datasets.Features(
97
+ {
98
+ "author": datasets.Value("string"),
99
+ "subreddit": datasets.Value("string"),
100
+ "country": datasets.Value("string"),
101
+ "date": datasets.Value("int32"),
102
+ "confidence": datasets.Value("int32"),
103
+ "lang1": datasets.Value("string"),
104
+ "lang2": datasets.Value("string"),
105
+ "text": datasets.Value("string"),
106
+ "id": datasets.Value("string"),
107
+ "link_id": datasets.Value("string"),
108
+ "parent_id": datasets.Value("string"),
109
+ }
110
+ )
111
+ elif "eng_monolingual" in self.config.subset_id:
112
+ features = datasets.Features(
113
+ {
114
+ "author": datasets.Value("string"),
115
+ "subreddit": datasets.Value("string"),
116
+ "country": datasets.Value("string"),
117
+ "date": datasets.Value("int32"),
118
+ "confidence": datasets.Value("int32"),
119
+ "lang": datasets.Value("string"),
120
+ "text": datasets.Value("string"),
121
+ }
122
+ )
123
+
124
+ elif self.config.schema == "seacrowd_ssp":
125
+ features = schemas.ssp_features
126
+ elif self.config.schema == "seacrowd_text_multi":
127
+ features = schemas.text_multi_features(label_names=list(self.INCLUDED_LANGUAGES.values()))
128
+
129
+ return datasets.DatasetInfo(
130
+ description=_DESCRIPTION,
131
+ features=features,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+ )
136
+
137
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
138
+ """Returns SplitGenerators."""
139
+ data_dir = dl_manager.download_and_extract(_URL)
140
+ if "cs" in self.config.subset_id:
141
+ filepath = os.path.join(data_dir, "cs_main_reddit_corpus.csv")
142
+ elif "eng_monolingual" in self.config.subset_id:
143
+ filepath = os.path.join(data_dir, "eng_monolingual_reddit_corpus.csv")
144
+
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "filepath": filepath,
150
+ "split": "train",
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
156
+ """Yields examples as (key, example) tuples."""
157
+ df = pd.read_csv(filepath, index_col=None, header="infer", encoding="utf-8")
158
+ df = df[df["Subreddit"].isin(self.INCLUDED_SUBREDDITS)]
159
+
160
+ if self.config.subset_id.split("_")[-1] == "cs":
161
+ df = df[(df["Lang1"].isin(self.INCLUDED_LANGUAGES)) & (df["Lang2"].isin(self.INCLUDED_LANGUAGES))]
162
+ df.reset_index(drop=True, inplace=True)
163
+
164
+ for index, row in df.iterrows():
165
+ parsed_text = html.unescape(row["Text"])
166
+ if self.config.schema == "source":
167
+ example = {
168
+ "author": row["Author"],
169
+ "subreddit": row["Subreddit"],
170
+ "country": row["Country"],
171
+ "date": row["Date"],
172
+ "confidence": row["confidence"],
173
+ "lang1": row["Lang1"],
174
+ "lang2": row["Lang2"],
175
+ "text": parsed_text,
176
+ "id": row["id"],
177
+ "link_id": row["link_id"],
178
+ "parent_id": row["parent_id"],
179
+ }
180
+
181
+ elif self.config.schema == "seacrowd_text_multi":
182
+ lang_one, lang_two = self.INCLUDED_LANGUAGES[row["Lang1"]], self.INCLUDED_LANGUAGES[row["Lang2"]]
183
+ example = {
184
+ "id": str(index),
185
+ "text": parsed_text,
186
+ "labels": list(sorted([lang_one, lang_two])), # Language order doesn't matter in original dataset; just arrange alphabetically for consistency
187
+ }
188
+ yield index, example
189
+
190
+ else:
191
+ df.reset_index(drop=True, inplace=True)
192
+ for index, row in df.iterrows():
193
+ parsed_text = html.unescape(row["Text"])
194
+ if self.config.schema == "source":
195
+ example = {
196
+ "author": row["Author"],
197
+ "subreddit": row["Subreddit"],
198
+ "country": row["Country"],
199
+ "date": row["Date"],
200
+ "confidence": row["confidence"],
201
+ "lang": row["Lang"],
202
+ "text": parsed_text,
203
+ }
204
+ elif self.config.schema == "seacrowd_ssp":
205
+ example = {
206
+ "id": str(index),
207
+ "text": parsed_text,
208
+ }
209
+ yield index, example