init
Browse files- training_scripts/script.sh +5 -5
- tweet_topic_multilingual.py +14 -8
training_scripts/script.sh
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
|
2 |
# TweetTopic
|
3 |
-
python finetune_t5.py --dataset-name en_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp
|
4 |
|
5 |
-
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp
|
6 |
-
python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en --model-organization cardiffnlp
|
7 |
|
8 |
-
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-ja --model-organization cardiffnlp
|
9 |
-
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp
|
10 |
|
11 |
|
|
|
1 |
|
2 |
# TweetTopic
|
3 |
+
python finetune_t5.py --dataset-name en_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022 --model-organization cardiffnlp --use-auth-token
|
4 |
|
5 |
+
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token
|
6 |
+
python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en --model-organization cardiffnlp --use-auth-token
|
7 |
|
8 |
+
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-ja --model-organization cardiffnlp --use-auth-token
|
9 |
+
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token
|
10 |
|
11 |
|
tweet_topic_multilingual.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
""" TweetTopicMultilingual Dataset """
|
2 |
import json
|
|
|
|
|
3 |
import datasets
|
4 |
|
5 |
logger = datasets.logging.get_logger(__name__)
|
6 |
_DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
|
7 |
-
_VERSION = "0.0.
|
8 |
_CITATION = """TBA"""
|
9 |
_HOME_PAGE = "https://cardiffnlp.github.io"
|
10 |
_NAME = "tweet_topic_multilingual"
|
@@ -102,6 +104,7 @@ _URL = {}
|
|
102 |
for lan in _LANGUAGES:
|
103 |
_URL[lan] = {split: f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for split in ["train", "test", "validation"]}
|
104 |
_URL["en_2022"] = {split: f"{_ROOT_URL}/en_2022/{split}.jsonl" for split in ["train", "validation"]}
|
|
|
105 |
# cross validation
|
106 |
for lan in _LANGUAGES:
|
107 |
_URL.update({
|
@@ -136,14 +139,17 @@ class TweetTopicMultilingual(datasets.GeneratorBasedBuilder):
|
|
136 |
splits = _URL[self.config.name].keys()
|
137 |
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]
|
138 |
|
139 |
-
def _generate_examples(self, filepath):
|
140 |
_key = 0
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
147 |
|
148 |
def _info(self):
|
149 |
return datasets.DatasetInfo(
|
|
|
1 |
""" TweetTopicMultilingual Dataset """
|
2 |
import json
|
3 |
+
from typing import List, Union
|
4 |
+
|
5 |
import datasets
|
6 |
|
7 |
logger = datasets.logging.get_logger(__name__)
|
8 |
_DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
|
9 |
+
_VERSION = "0.0.5"
|
10 |
_CITATION = """TBA"""
|
11 |
_HOME_PAGE = "https://cardiffnlp.github.io"
|
12 |
_NAME = "tweet_topic_multilingual"
|
|
|
104 |
for lan in _LANGUAGES:
|
105 |
_URL[lan] = {split: f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for split in ["train", "test", "validation"]}
|
106 |
_URL["en_2022"] = {split: f"{_ROOT_URL}/en_2022/{split}.jsonl" for split in ["train", "validation"]}
|
107 |
+
_URL["mix"] = {split: [f"{_ROOT_URL}/{lan}/{split}.jsonl" for lan in _LANGUAGES] for split in ["train", "validation"]}
|
108 |
# cross validation
|
109 |
for lan in _LANGUAGES:
|
110 |
_URL.update({
|
|
|
139 |
splits = _URL[self.config.name].keys()
|
140 |
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]
|
141 |
|
142 |
+
def _generate_examples(self, filepath: Union[str, List[str]]):
|
143 |
_key = 0
|
144 |
+
if type(filepath) is not List:
|
145 |
+
filepath = [filepath]
|
146 |
+
for i in filepath:
|
147 |
+
logger.info("generating examples from = %s", i)
|
148 |
+
with open(i, encoding="utf-8") as f:
|
149 |
+
_list = [json.loads(i) for i in f.read().split("\n") if len(i) > 0]
|
150 |
+
for i in _list:
|
151 |
+
yield _key, i
|
152 |
+
_key += 1
|
153 |
|
154 |
def _info(self):
|
155 |
return datasets.DatasetInfo(
|