antypasd commited on
Commit
b2c0ed2
1 Parent(s): 5a926e4

Create tweet_topic_multilingual.py

Browse files
Files changed (1) hide show
  1. tweet_topic_multilingual.py +173 -0
tweet_topic_multilingual.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TweetTopicMultilingual Dataset """
2
+ import json
3
+ from typing import List
4
+
5
+ import datasets
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+ _DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
9
+ _VERSION = "0.0.91"
10
+ _CITATION = """TBA"""
11
+ _HOME_PAGE = "https://cardiffnlp.github.io"
12
+ _NAME = "tweet_topic_multilingual"
13
+ _ROOT_URL = f"https://huggingface.co/datasets/cardiffnlp/{_NAME}/resolve/main/dataset"
14
+ _LANGUAGES = ["en", "es", "ja", "gr"]
15
+ _CLASS_MAPPING = {
16
+ "en": [
17
+ "Arts & Culture",
18
+ "Business & Entrepreneurs",
19
+ "Celebrity & Pop Culture",
20
+ "Diaries & Daily Life",
21
+ "Family",
22
+ "Fashion & Style",
23
+ "Film, TV & Video",
24
+ "Fitness & Health",
25
+ "Food & Dining",
26
+ "Learning & Educational",
27
+ "News & Social Concern",
28
+ "Relationships",
29
+ "Science & Technology",
30
+ "Youth & Student Life",
31
+ "Music",
32
+ "Gaming",
33
+ "Sports",
34
+ "Travel & Adventure",
35
+ "Other Hobbies"
36
+ ],
37
+ "gr": [
38
+ "Τέχνες & Πολιτισμός",
39
+ "Επιχειρήσεις & Επιχειρηματίες",
40
+ "Διασημότητες & Ποπ κουλτούρα",
41
+ "Ημερολόγια & Καθημερινή ζωή",
42
+ "Οικογένεια",
43
+ "Μόδα & Στυλ",
44
+ "Ταινίες, τηλεόραση & βίντεο",
45
+ "Γυμναστική & Υεία",
46
+ "Φαγητό & Δείπνο",
47
+ "Μάθηση & Εκπαίδευση",
48
+ "Ειδήσεις & Κοινωνία",
49
+ "Σχέσεις",
50
+ "Επιστήμη & Τεχνολογία",
51
+ "Νεανική & Φοιτητική ζωή",
52
+ "Μουσική",
53
+ "Παιχνίδια",
54
+ "Αθλητισμός",
55
+ "Ταξίδια & Περιπέτεια",
56
+ "Άλλα χόμπι"
57
+ ],
58
+ "es": [
59
+ "Arte y cultura",
60
+ "Negocios y emprendedores",
61
+ "Celebridades y cultura pop",
62
+ "Diarios y vida diaria",
63
+ "Familia",
64
+ "Moda y estilo",
65
+ "Cine, televisión y video",
66
+ "Estado físico y salud",
67
+ "Comida y comedor",
68
+ "Aprendizaje y educación",
69
+ "Noticias e interés social",
70
+ "Relaciones",
71
+ "Ciencia y Tecnología",
72
+ "Juventud y Vida Estudiantil",
73
+ "Música",
74
+ "Juegos",
75
+ "Deportes",
76
+ "Viajes y aventuras",
77
+ "Otros pasatiempos"
78
+ ],
79
+ "ja": [
80
+ "アート&カルチャー",
81
+ "ビジネス",
82
+ "芸能",
83
+ "日常",
84
+ "家族",
85
+ "ファッション",
86
+ "映画&ラジオ",
87
+ "フィットネス&健康",
88
+ "料理",
89
+ "教育関連",
90
+ "社会",
91
+ "人間関係",
92
+ "サイエンス",
93
+ "学校",
94
+ "音楽",
95
+ "ゲーム",
96
+ "スポーツ",
97
+ "旅行",
98
+ "その他"
99
+ ]
100
+ }
101
+
102
+ _URL = {}
103
+ # plain split
104
+ for lan in _LANGUAGES:
105
+ _URL[lan] = {split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl"] for split in ["train", "test", "validation"]}
106
+ _URL["en_2022"] = {split: [f"{_ROOT_URL}/en_2022/{split}.jsonl"] for split in ["train", "validation"]}
107
+ _URL["mix"] = {
108
+ split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] for split in ["train", "validation"]
109
+ }
110
+ _URL["mix_2022"] = {
111
+ split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] + [f"{_ROOT_URL}/en_2022/{split}.jsonl"]
112
+ for split in ["train", "validation"]
113
+ }
114
+ # cross validation
115
+ for lan in _LANGUAGES:
116
+ _URL.update({
117
+ f"{lan}_cross_validation_{n}": {
118
+ split: [f"{_ROOT_URL}/{lan}/cross_validation/{lan}_{split}_{n}.jsonl"]
119
+ for split in ["train", "test", "validation"]
120
+ } for n in range(5)
121
+ })
122
+
123
+
124
+ class Config(datasets.BuilderConfig):
125
+ """BuilderConfig"""
126
+
127
+ def __init__(self, **kwargs):
128
+ """BuilderConfig.
129
+
130
+ Args:
131
+ **kwargs: keyword arguments forwarded to super.
132
+ """
133
+ super(Config, self).__init__(**kwargs)
134
+
135
+
136
+ class TweetTopicMultilingual(datasets.GeneratorBasedBuilder):
137
+ """Dataset."""
138
+
139
+ BUILDER_CONFIGS = [
140
+ Config(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION) for i in _URL.keys()
141
+ ]
142
+
143
+ def _split_generators(self, dl_manager):
144
+ downloaded_file = dl_manager.download_and_extract(_URL[self.config.name])
145
+ splits = _URL[self.config.name].keys()
146
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]
147
+
148
+ def _generate_examples(self, filepath: List[str]):
149
+ _key = 0
150
+ for _file in filepath:
151
+ logger.info("generating examples from = %s", _file)
152
+ with open(_file, encoding="utf-8") as f:
153
+ _list = [json.loads(i) for i in f.read().split("\n") if len(i) > 0]
154
+ for i in _list:
155
+ yield _key, i
156
+ _key += 1
157
+
158
+ def _info(self):
159
+ return datasets.DatasetInfo(
160
+ description=_DESCRIPTION,
161
+ features=datasets.Features(
162
+ {
163
+ "id": datasets.Value("string"),
164
+ "text": datasets.Value("string"),
165
+ "label_name_flatten": datasets.Value("string"),
166
+ "label": datasets.Sequence(datasets.features.ClassLabel(names=_CLASS_MAPPING["en"])),
167
+ "label_name": datasets.Sequence(datasets.Value("string"))
168
+ }
169
+ ),
170
+ supervised_keys=None,
171
+ homepage=_HOME_PAGE,
172
+ citation=_CITATION,
173
+ )