holylovenia commited on
Commit
c3d605f
·
verified ·
1 Parent(s): 459498f

Upload tcope.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tcope.py +163 -0
tcope.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from pathlib import Path
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """
26
+ @article{gonzales_broadening_2023,
27
+ author = {Gonzales, Wilkinson Daniel Wong},
28
+ title = {Broadening horizons in the diachronic and sociolinguisstic study of
29
+ Philippine Englishes with the Twitter Corpus of Philippine Englishes (TCOPE)},
30
+ journal = {English World-Wide},
31
+ year = {2023},
32
+ url = {https://osf.io/k3qzx},
33
+ doi = {10.17605/OSF.IO/3Q5PW},
34
+ }
35
+ """
36
+
37
+ _LOCAL = False
38
+ _LANGUAGES = ["eng", "fil"]
39
+ _DATASETNAME = "tcope"
40
+ _DESCRIPTION = """
41
+ The TCOPE dataset consists of public tweets (amounting to about 13.5 million words) collected from 13 major cities from the Philippines.
42
+ Tweets are either purely in English or involve code-switching between English and Filipino.
43
+ Tweets are tagged for part-of-speech and dependency parsing using spaCy. Tweets collected are from 2010 to 2021.
44
+ The publicly available dataset is only a random sample (10%) from the whole TCOPE dataset, which consist of roughly 27 million tweets
45
+ (amounting to about 135 million words) collected from 29 major cities during the same date range.
46
+ """
47
+
48
+ _HOMEPAGE = "https://osf.io/3q5pw/wiki/home/"
49
+ _LICENSE = Licenses.CC0_1_0.value
50
+ _URL = "https://files.osf.io/v1/resources/3q5pw/providers/osfstorage/63737a5b0e715d3616a998f7"
51
+
52
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING, Tasks.DEPENDENCY_PARSING]
53
+ _SOURCE_VERSION = "1.0.0"
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class TCOPEDataset(datasets.GeneratorBasedBuilder):
58
+ """TCOPE is a dataset of Philippine English tweets by Gonzales (2023)."""
59
+
60
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
61
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
62
+
63
+ # Actual data has invalid "labels" likely due to coding errors,
64
+ # such as "BODY", "BIRTHDAY", "HAVAIANAS", etc. Only valid
65
+ # POS tags are included here and in loaded data.
66
+ POS_LABELS = ["NOUN", "PUNCT", "PROPN", "VERB", "PRON", "ADP", "ADJ", "ADV", "DET", "AUX", "PART", "CCONJ", "INTJ", "SPACE", "SCONJ", "NUM", "X", "SYM"]
67
+
68
+ BUILDER_CONFIGS = [
69
+ SEACrowdConfig(
70
+ name=f"{_DATASETNAME}_source",
71
+ version=SOURCE_VERSION,
72
+ description=f"{_DATASETNAME} source schema",
73
+ schema="source",
74
+ subset_id=_DATASETNAME,
75
+ ),
76
+ SEACrowdConfig(
77
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
78
+ version=SEACROWD_VERSION,
79
+ description=f"{_DATASETNAME} SEACrowd sequence labeling schema",
80
+ schema="seacrowd_seq_label",
81
+ subset_id=_DATASETNAME,
82
+ ),
83
+ ]
84
+
85
+ DEFAULT_CONFIG_NAME = "tcope_source"
86
+
87
+ def _info(self) -> datasets.DatasetInfo:
88
+ if self.config.schema == "source":
89
+ features = datasets.Features(
90
+ {
91
+ "copeid": datasets.Value("string"),
92
+ "userid": datasets.Value("int64"),
93
+ "divided_tweet": datasets.Value("string"),
94
+ "postag": datasets.Value("string"),
95
+ "deptag": datasets.Value("string"),
96
+ "citycode": datasets.Value("string"),
97
+ "year": datasets.Value("int64"),
98
+ "extendedcope": datasets.Value("string"),
99
+ }
100
+ )
101
+
102
+ elif self.config.schema == "seacrowd_seq_label":
103
+ features = schemas.seq_label_features(label_names=self.POS_LABELS)
104
+
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=features,
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
114
+ """Returns SplitGenerators."""
115
+ # First ZIP contains second ZIP
116
+ # Second ZIP has spreadsheet data
117
+ folder_zip_dir = dl_manager.download_and_extract(_URL)
118
+ spreadsheet_zip_dir = dl_manager.extract(f"{folder_zip_dir}/public_v1/spreadsheet_format.zip")
119
+ spreadsheet_fp = f"{spreadsheet_zip_dir}/spreadsheet_format/tcope_v1_public_sample.csv"
120
+
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ gen_kwargs={
125
+ "filepath": spreadsheet_fp,
126
+ "split": "train",
127
+ },
128
+ ),
129
+ ]
130
+
131
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
132
+ """Yields examples as (key, example) tuples."""
133
+ if self.config.schema not in ("source", "seacrowd_seq_label"):
134
+ raise ValueError(f"Received unexpected config schema {self.config.schema}")
135
+
136
+ df = pd.read_csv(filepath, index_col=None)
137
+ df = df.rename(columns={"divided.tweet": "divided_tweet"}).query("divided_tweet.notna()")
138
+
139
+ for index, row in df.iterrows():
140
+ if self.config.schema == "source":
141
+ example = row.to_dict()
142
+ elif self.config.schema == "seacrowd_seq_label":
143
+ tokens, tags = self.split_token_and_tag(row["postag"], valid_tags=self.POS_LABELS)
144
+ example = {
145
+ "id": str(index),
146
+ "tokens": tokens,
147
+ "labels": tags,
148
+ }
149
+ yield index, example
150
+
151
+ def split_token_and_tag(self, tweet: str, valid_tags: List[str]) -> Tuple[List[str], List[str]]:
152
+ """Split tweet into two separate lists of tokens and tags."""
153
+ tokens_with_tags = tweet.split()
154
+ tokens = []
155
+ tags = []
156
+ for indiv_token_with_tag in tokens_with_tags:
157
+ token, tag = indiv_token_with_tag.rsplit("_", 1)
158
+ tokens.append(token)
159
+ if tag in valid_tags:
160
+ tags.append(tag)
161
+ else: # Use "X"/other spaCy tag for invalid POS tags
162
+ tags.append("X")
163
+ return tokens, tags