Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Russian
Size:
10K - 100K
Tags:
emotion-classification
License:
Commit
•
6bff699
1
Parent(s):
7ca07aa
Delete loading script
Browse files
cedr.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# Lint as: python3
|
17 |
-
"""CEDR dataset"""
|
18 |
-
|
19 |
-
import json
|
20 |
-
import os
|
21 |
-
|
22 |
-
import datasets
|
23 |
-
|
24 |
-
|
25 |
-
# TODO: Add BibTeX citation
|
26 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
27 |
-
_CITATION = """\
|
28 |
-
@article{sboev2021data,
|
29 |
-
title={Data-Driven Model for Emotion Detection in Russian Texts},
|
30 |
-
author={Sboev, Alexander and Naumov, Aleksandr and Rybka, Roman},
|
31 |
-
journal={Procedia Computer Science},
|
32 |
-
volume={190},
|
33 |
-
pages={637--642},
|
34 |
-
year={2021},
|
35 |
-
publisher={Elsevier}
|
36 |
-
}
|
37 |
-
"""
|
38 |
-
|
39 |
-
_LICENSE = """http://www.apache.org/licenses/LICENSE-2.0"""
|
40 |
-
|
41 |
-
# TODO: Add description of the dataset here
|
42 |
-
# You can copy an official description
|
43 |
-
_DESCRIPTION = """\
|
44 |
-
This new dataset is designed to solve emotion recognition task for text data in Russian. The Corpus for Emotions Detecting in
|
45 |
-
Russian-language text sentences of different social sources (CEDR) contains 9410 sentences in Russian labeled for 5 emotion
|
46 |
-
categories. The data collected from different sources: posts of the LiveJournal social network, texts of the online news
|
47 |
-
agency Lenta.ru, and Twitter microblog posts. There are two variants of the corpus: main and enriched. The enriched variant
|
48 |
-
is include tokenization and lemmatization. Dataset with predefined train/test splits.
|
49 |
-
"""
|
50 |
-
|
51 |
-
# TODO: Add a link to an official homepage for the dataset here
|
52 |
-
_HOMEPAGE = "https://github.com/sag111/CEDR"
|
53 |
-
|
54 |
-
# TODO: Add link to the official dataset URLs here
|
55 |
-
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
56 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
57 |
-
_URLs = {
|
58 |
-
"main": "https://sagteam.ru/cedr/main.zip",
|
59 |
-
"enriched": "https://sagteam.ru/cedr/enriched.zip",
|
60 |
-
}
|
61 |
-
|
62 |
-
|
63 |
-
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
64 |
-
class Cedr(datasets.GeneratorBasedBuilder):
|
65 |
-
"""This dataset is designed to solve emotion recognition task for text data in Russian."""
|
66 |
-
|
67 |
-
VERSION = datasets.Version("0.1.1")
|
68 |
-
|
69 |
-
# This is an example of a dataset with multiple configurations.
|
70 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
71 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
72 |
-
|
73 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
74 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
75 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
76 |
-
|
77 |
-
# You will be able to load one or the other configurations in the following list with
|
78 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
79 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
80 |
-
BUILDER_CONFIGS = [
|
81 |
-
datasets.BuilderConfig(
|
82 |
-
name="main", version=VERSION, description="This part of CEDR dataset covers a main version"
|
83 |
-
),
|
84 |
-
datasets.BuilderConfig(
|
85 |
-
name="enriched", version=VERSION, description="This part of CEDR dataset covers a enriched version"
|
86 |
-
),
|
87 |
-
]
|
88 |
-
|
89 |
-
DEFAULT_CONFIG_NAME = "main" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
90 |
-
|
91 |
-
def _info(self):
|
92 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
93 |
-
if self.config.name == "main": # This is the name of the configuration selected in BUILDER_CONFIGS above
|
94 |
-
features = datasets.Features(
|
95 |
-
{
|
96 |
-
"text": datasets.Value("string"),
|
97 |
-
"labels": datasets.features.Sequence(
|
98 |
-
datasets.ClassLabel(names=["joy", "sadness", "surprise", "fear", "anger"])
|
99 |
-
),
|
100 |
-
"source": datasets.Value("string"),
|
101 |
-
# These are the features of your dataset like images, labels ...
|
102 |
-
}
|
103 |
-
)
|
104 |
-
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
105 |
-
features = datasets.Features(
|
106 |
-
{
|
107 |
-
"text": datasets.Value("string"),
|
108 |
-
"labels": datasets.features.Sequence(
|
109 |
-
datasets.ClassLabel(names=["joy", "sadness", "surprise", "fear", "anger"])
|
110 |
-
),
|
111 |
-
"source": datasets.Value("string"),
|
112 |
-
"sentences": [
|
113 |
-
[
|
114 |
-
{
|
115 |
-
"forma": datasets.Value("string"),
|
116 |
-
"lemma": datasets.Value("string"),
|
117 |
-
}
|
118 |
-
]
|
119 |
-
]
|
120 |
-
# These are the features of your dataset like images, labels ...
|
121 |
-
}
|
122 |
-
)
|
123 |
-
return datasets.DatasetInfo(
|
124 |
-
# This is the description that will appear on the datasets page.
|
125 |
-
description=_DESCRIPTION,
|
126 |
-
# This defines the different columns of the dataset and their types
|
127 |
-
features=features, # Here we define them above because they are different between the two configurations
|
128 |
-
# If there's a common (input, target) tuple from the features,
|
129 |
-
# specify them here. They'll be used if as_supervised=True in
|
130 |
-
# builder.as_dataset.
|
131 |
-
supervised_keys=None,
|
132 |
-
# Homepage of the dataset for documentation
|
133 |
-
homepage=_HOMEPAGE,
|
134 |
-
# License for the dataset if available
|
135 |
-
license=_LICENSE,
|
136 |
-
# Citation for the dataset
|
137 |
-
citation=_CITATION,
|
138 |
-
)
|
139 |
-
|
140 |
-
def _split_generators(self, dl_manager):
|
141 |
-
"""Returns SplitGenerators."""
|
142 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
143 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
144 |
-
|
145 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
146 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
147 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
148 |
-
my_urls = _URLs[self.config.name]
|
149 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
150 |
-
return [
|
151 |
-
datasets.SplitGenerator(
|
152 |
-
name=datasets.Split.TRAIN,
|
153 |
-
# These kwargs will be passed to _generate_examples
|
154 |
-
gen_kwargs={
|
155 |
-
"filepath": os.path.join(data_dir, self.config.name, "train.jsonl"),
|
156 |
-
"split": "train",
|
157 |
-
},
|
158 |
-
),
|
159 |
-
datasets.SplitGenerator(
|
160 |
-
name=datasets.Split.TEST,
|
161 |
-
# These kwargs will be passed to _generate_examples
|
162 |
-
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, "test.jsonl"), "split": "test"},
|
163 |
-
),
|
164 |
-
]
|
165 |
-
|
166 |
-
def _generate_examples(
|
167 |
-
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
168 |
-
):
|
169 |
-
"""Yields examples as (key, example) tuples."""
|
170 |
-
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
171 |
-
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
172 |
-
|
173 |
-
with open(filepath, encoding="utf-8") as f:
|
174 |
-
for id_, row in enumerate(f):
|
175 |
-
data = json.loads(row)
|
176 |
-
if self.config.name == "main":
|
177 |
-
yield id_, {
|
178 |
-
"text": data["text"],
|
179 |
-
"source": data["source"],
|
180 |
-
"labels": data["labels"],
|
181 |
-
}
|
182 |
-
else:
|
183 |
-
yield id_, {
|
184 |
-
"text": data["text"],
|
185 |
-
"source": data["source"],
|
186 |
-
"sentences": data["sentences"],
|
187 |
-
"labels": data["labels"],
|
188 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|