albertvillanova HF staff commited on
Commit
abafbe6
1 Parent(s): 2e7cb8f

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (8e9e5045dd01f89a3e0a61867eca5d4be0322d5b)
- Add enriched data files (7ca07aae8ef8871598c4ba9ed777ba0dca00df0f)
- Delete loading script (6bff699c9492d248cc5a264ef8c4d2882d432076)

README.md CHANGED
@@ -23,7 +23,7 @@ pretty_name: The Corpus for Emotions Detecting in Russian-language text sentence
23
  tags:
24
  - emotion-classification
25
  dataset_info:
26
- - config_name: main
27
  features:
28
  - name: text
29
  dtype: string
@@ -38,16 +38,23 @@ dataset_info:
38
  '4': anger
39
  - name: source
40
  dtype: string
 
 
 
 
 
 
 
41
  splits:
42
  - name: train
43
- num_bytes: 1418355
44
  num_examples: 7528
45
  - name: test
46
- num_bytes: 350275
47
  num_examples: 1882
48
- download_size: 693026
49
- dataset_size: 1768630
50
- - config_name: enriched
51
  features:
52
  - name: text
53
  dtype: string
@@ -62,22 +69,29 @@ dataset_info:
62
  '4': anger
63
  - name: source
64
  dtype: string
65
- - name: sentences
66
- list:
67
- list:
68
- - name: forma
69
- dtype: string
70
- - name: lemma
71
- dtype: string
72
  splits:
73
  - name: train
74
- num_bytes: 4792366
75
  num_examples: 7528
76
  - name: test
77
- num_bytes: 1182343
78
  num_examples: 1882
79
- download_size: 1822522
80
- dataset_size: 5974709
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  ---
82
 
83
  # Dataset Card for [cedr]
23
  tags:
24
  - emotion-classification
25
  dataset_info:
26
+ - config_name: enriched
27
  features:
28
  - name: text
29
  dtype: string
38
  '4': anger
39
  - name: source
40
  dtype: string
41
+ - name: sentences
42
+ list:
43
+ list:
44
+ - name: forma
45
+ dtype: string
46
+ - name: lemma
47
+ dtype: string
48
  splits:
49
  - name: train
50
+ num_bytes: 4792338
51
  num_examples: 7528
52
  - name: test
53
+ num_bytes: 1182315
54
  num_examples: 1882
55
+ download_size: 2571516
56
+ dataset_size: 5974653
57
+ - config_name: main
58
  features:
59
  - name: text
60
  dtype: string
69
  '4': anger
70
  - name: source
71
  dtype: string
 
 
 
 
 
 
 
72
  splits:
73
  - name: train
74
+ num_bytes: 1418343
75
  num_examples: 7528
76
  - name: test
77
+ num_bytes: 350263
78
  num_examples: 1882
79
+ download_size: 945328
80
+ dataset_size: 1768606
81
+ configs:
82
+ - config_name: enriched
83
+ data_files:
84
+ - split: train
85
+ path: enriched/train-*
86
+ - split: test
87
+ path: enriched/test-*
88
+ - config_name: main
89
+ data_files:
90
+ - split: train
91
+ path: main/train-*
92
+ - split: test
93
+ path: main/test-*
94
+ default: true
95
  ---
96
 
97
  # Dataset Card for [cedr]
cedr.py DELETED
@@ -1,188 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CEDR dataset"""
18
-
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @article{sboev2021data,
29
- title={Data-Driven Model for Emotion Detection in Russian Texts},
30
- author={Sboev, Alexander and Naumov, Aleksandr and Rybka, Roman},
31
- journal={Procedia Computer Science},
32
- volume={190},
33
- pages={637--642},
34
- year={2021},
35
- publisher={Elsevier}
36
- }
37
- """
38
-
39
- _LICENSE = """http://www.apache.org/licenses/LICENSE-2.0"""
40
-
41
- # TODO: Add description of the dataset here
42
- # You can copy an official description
43
- _DESCRIPTION = """\
44
- This new dataset is designed to solve emotion recognition task for text data in Russian. The Corpus for Emotions Detecting in
45
- Russian-language text sentences of different social sources (CEDR) contains 9410 sentences in Russian labeled for 5 emotion
46
- categories. The data collected from different sources: posts of the LiveJournal social network, texts of the online news
47
- agency Lenta.ru, and Twitter microblog posts. There are two variants of the corpus: main and enriched. The enriched variant
48
- is include tokenization and lemmatization. Dataset with predefined train/test splits.
49
- """
50
-
51
- # TODO: Add a link to an official homepage for the dataset here
52
- _HOMEPAGE = "https://github.com/sag111/CEDR"
53
-
54
- # TODO: Add link to the official dataset URLs here
55
- # The HuggingFace dataset library don't host the datasets but only point to the original files
56
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
57
- _URLs = {
58
- "main": "https://sagteam.ru/cedr/main.zip",
59
- "enriched": "https://sagteam.ru/cedr/enriched.zip",
60
- }
61
-
62
-
63
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
64
- class Cedr(datasets.GeneratorBasedBuilder):
65
- """This dataset is designed to solve emotion recognition task for text data in Russian."""
66
-
67
- VERSION = datasets.Version("0.1.1")
68
-
69
- # This is an example of a dataset with multiple configurations.
70
- # If you don't want/need to define several sub-sets in your dataset,
71
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
72
-
73
- # If you need to make complex sub-parts in the datasets with configurable options
74
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
75
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
76
-
77
- # You will be able to load one or the other configurations in the following list with
78
- # data = datasets.load_dataset('my_dataset', 'first_domain')
79
- # data = datasets.load_dataset('my_dataset', 'second_domain')
80
- BUILDER_CONFIGS = [
81
- datasets.BuilderConfig(
82
- name="main", version=VERSION, description="This part of CEDR dataset covers a main version"
83
- ),
84
- datasets.BuilderConfig(
85
- name="enriched", version=VERSION, description="This part of CEDR dataset covers a enriched version"
86
- ),
87
- ]
88
-
89
- DEFAULT_CONFIG_NAME = "main" # It's not mandatory to have a default configuration. Just use one if it make sense.
90
-
91
- def _info(self):
92
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
93
- if self.config.name == "main": # This is the name of the configuration selected in BUILDER_CONFIGS above
94
- features = datasets.Features(
95
- {
96
- "text": datasets.Value("string"),
97
- "labels": datasets.features.Sequence(
98
- datasets.ClassLabel(names=["joy", "sadness", "surprise", "fear", "anger"])
99
- ),
100
- "source": datasets.Value("string"),
101
- # These are the features of your dataset like images, labels ...
102
- }
103
- )
104
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
105
- features = datasets.Features(
106
- {
107
- "text": datasets.Value("string"),
108
- "labels": datasets.features.Sequence(
109
- datasets.ClassLabel(names=["joy", "sadness", "surprise", "fear", "anger"])
110
- ),
111
- "source": datasets.Value("string"),
112
- "sentences": [
113
- [
114
- {
115
- "forma": datasets.Value("string"),
116
- "lemma": datasets.Value("string"),
117
- }
118
- ]
119
- ]
120
- # These are the features of your dataset like images, labels ...
121
- }
122
- )
123
- return datasets.DatasetInfo(
124
- # This is the description that will appear on the datasets page.
125
- description=_DESCRIPTION,
126
- # This defines the different columns of the dataset and their types
127
- features=features, # Here we define them above because they are different between the two configurations
128
- # If there's a common (input, target) tuple from the features,
129
- # specify them here. They'll be used if as_supervised=True in
130
- # builder.as_dataset.
131
- supervised_keys=None,
132
- # Homepage of the dataset for documentation
133
- homepage=_HOMEPAGE,
134
- # License for the dataset if available
135
- license=_LICENSE,
136
- # Citation for the dataset
137
- citation=_CITATION,
138
- )
139
-
140
- def _split_generators(self, dl_manager):
141
- """Returns SplitGenerators."""
142
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
143
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
144
-
145
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
146
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
147
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
148
- my_urls = _URLs[self.config.name]
149
- data_dir = dl_manager.download_and_extract(my_urls)
150
- return [
151
- datasets.SplitGenerator(
152
- name=datasets.Split.TRAIN,
153
- # These kwargs will be passed to _generate_examples
154
- gen_kwargs={
155
- "filepath": os.path.join(data_dir, self.config.name, "train.jsonl"),
156
- "split": "train",
157
- },
158
- ),
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TEST,
161
- # These kwargs will be passed to _generate_examples
162
- gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, "test.jsonl"), "split": "test"},
163
- ),
164
- ]
165
-
166
- def _generate_examples(
167
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
168
- ):
169
- """Yields examples as (key, example) tuples."""
170
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
171
- # The `key` is here for legacy reason (tfds) and is not important in itself.
172
-
173
- with open(filepath, encoding="utf-8") as f:
174
- for id_, row in enumerate(f):
175
- data = json.loads(row)
176
- if self.config.name == "main":
177
- yield id_, {
178
- "text": data["text"],
179
- "source": data["source"],
180
- "labels": data["labels"],
181
- }
182
- else:
183
- yield id_, {
184
- "text": data["text"],
185
- "source": data["source"],
186
- "sentences": data["sentences"],
187
- "labels": data["labels"],
188
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
enriched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:323b7f63f37ecc497a08b411d78f5750d7fe9548b24104596f3f28103a1f8073
3
+ size 512569
enriched/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8025424aeb3b2c00b90cb7ec81f5820aa11f102edeb030d436f2b391314f8094
3
+ size 2058947
main/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45c72e49d06ffb1db34aa3082f24c28148c1bfba6aa16ee150b7d43c52de2dc3
3
+ size 187983
main/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d26c21daed57be1657a80a78fe04c4865ad249f49070b6c5e00b2f90f811e1a
3
+ size 757345