many_emotions / many_emotions.py
ma2za's picture
🐉 usable dataset
07612c4
raw
history blame
4.64 kB
import json
from typing import List
import datasets
from datasets import ClassLabel, Value, load_dataset
_URLS = {
}
_SUB_CLASSES = [
"anger",
"fear",
"joy",
"love",
"sadness",
"surprise",
"neutral",
]
_CLASS_NAMES = [
"no emotion",
"happiness",
"admiration",
"amusement",
"anger",
"annoyance",
"approval",
"caring",
"confusion",
"curiosity",
"desire",
"disappointment",
"disapproval",
"disgust",
"embarrassment",
"excitement",
"fear",
"gratitude",
"grief",
"joy",
"love",
"nervousness",
"optimism",
"pride",
"realization",
"relief",
"remorse",
"sadness",
"surprise",
"neutral",
]
class EmotionsDatasetConfig(datasets.BuilderConfig):
def __init__(self, features, label_classes, **kwargs):
super().__init__(**kwargs)
self.features = features
self.label_classes = label_classes
class EmotionsDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
EmotionsDatasetConfig(
name="raw",
label_classes=_SUB_CLASSES,
features=["text", "label", "dataset", "license", "language"]
),
EmotionsDatasetConfig(
name="split",
label_classes=_SUB_CLASSES,
features=["text", "label", "dataset", "license", "language"]
)
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
'text': Value(dtype='string', id=None),
'label': ClassLabel(names=_SUB_CLASSES, id=None),
'dataset': Value(dtype='string', id=None),
'license': Value(dtype='string', id=None),
'language': Value(dtype='string', id=None)
}
)
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
splits = []
if self.config.name == "raw":
downloaded_files = dl_manager.download_and_extract(["data/many_emotions.json.gz"])
for lang in ["en", "fr", "it", "es", "de"]:
splits.append(datasets.SplitGenerator(name=lang,
gen_kwargs={"filepaths": downloaded_files,
"language": lang,
"dataset": "raw"}))
else:
for split in ["train", "validation", "test"]:
downloaded_files = dl_manager.download_and_extract([f"data/split_dataset_{split}.jsonl.gz"])
splits.append(datasets.SplitGenerator(name=split,
gen_kwargs={"filepaths": downloaded_files,
"dataset": "split"}))
return splits
def _generate_examples(self, filepaths, dataset, license=None, language=None):
if dataset == "raw":
for i, filepath in enumerate(filepaths):
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
example = json.loads(line)
if language != "all":
example = {
"id": example["id"],
'text': example["text" if language == "en" else language],
'label': example["label"],
'dataset': example["dataset"],
'license': example["license"]
}
label = _CLASS_NAMES[example["label"]]
if label == "no emotion":
label = "neutral"
elif label == "happiness":
label = "joy"
example.update({
"label": label
})
yield example["id"], example
else:
for i, filepath in enumerate(filepaths):
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
example = json.loads(line)
yield example["id"], example
if __name__ == "__main__":
dataset = load_dataset("ma2za/many_emotions", name="raw")
print()