albertvillanova HF staff commited on
Commit
9d09c49
1 Parent(s): c2ea15a

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (f54216c1d5c58fe141355c6a553335d583756233)
- Delete loading script (f67aca9457bfc6bcda79454f082675d971702b96)

README.md CHANGED
@@ -22,7 +22,6 @@ task_ids:
22
  - sentiment-classification
23
  - sentiment-scoring
24
  - topic-classification
25
- paperswithcode_id: null
26
  pretty_name: HateSpeechPl
27
  dataset_info:
28
  features:
@@ -50,10 +49,15 @@ dataset_info:
50
  dtype: uint8
51
  splits:
52
  - name: train
53
- num_bytes: 3436190
54
  num_examples: 13887
55
- download_size: 3877954
56
- dataset_size: 3436190
 
 
 
 
 
57
  ---
58
 
59
 
 
22
  - sentiment-classification
23
  - sentiment-scoring
24
  - topic-classification
 
25
  pretty_name: HateSpeechPl
26
  dataset_info:
27
  features:
 
49
  dtype: uint8
50
  splits:
51
  - name: train
52
+ num_bytes: 3436182
53
  num_examples: 13887
54
+ download_size: 2184056
55
+ dataset_size: 3436182
56
+ configs:
57
+ - config_name: default
58
+ data_files:
59
+ - split: train
60
+ path: data/train-*
61
  ---
62
 
63
 
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b58a2bff2a2ea25c1eb89e20495d13baefdc45ce431b4af70caaa4f368e7c91b
3
+ size 2184056
hate_speech_pl.py DELETED
@@ -1,111 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """HateSpeech Corpus for Polish"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = r"""\
25
- @article{troszynski2017czy,
26
- title={Czy komputer rozpozna hejtera? Wykorzystanie uczenia maszynowego (ML) w jako{\'s}ciowej analizie danych},
27
- author={Troszy{\'n}ski, Marek and Wawer, Aleksandra},
28
- journal={Przegl{\k{a}}d Socjologii Jako{\'s}ciowej},
29
- volume={13},
30
- number={2},
31
- pages={62--80},
32
- year={2017},
33
- publisher={Uniwersytet {\L}{\'o}dzki, Wydzia{\l} Ekonomiczno-Socjologiczny, Katedra Socjologii~…}
34
- }
35
- """
36
-
37
- _DESCRIPTION = """\
38
- HateSpeech corpus in the current version contains over 2000 posts crawled from public Polish web. They represent various types and degrees of offensive language, expressed toward minorities (eg. ethnical, racial). The data were annotated manually.
39
- """
40
-
41
- _HOMEPAGE = "http://zil.ipipan.waw.pl/HateSpeech"
42
-
43
- _LICENSE = "CC BY-NC-SA"
44
-
45
- _URLs = [
46
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011_ZK.csv",
47
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011b.csv",
48
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2012_luty.csv",
49
- ]
50
-
51
-
52
- class HateSpeechPl(datasets.GeneratorBasedBuilder):
53
- """HateSpeech Corpus for Polish"""
54
-
55
- VERSION = datasets.Version("1.1.0")
56
-
57
- def _info(self):
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=datasets.Features(
61
- {
62
- "id": datasets.Value("uint16"),
63
- "text_id": datasets.Value("uint32"),
64
- "annotator_id": datasets.Value("uint8"),
65
- "minority_id": datasets.Value("uint8"),
66
- "negative_emotions": datasets.Value("bool"),
67
- "call_to_action": datasets.Value("bool"),
68
- "source_of_knowledge": datasets.Value("uint8"),
69
- "irony_sarcasm": datasets.Value("bool"),
70
- "topic": datasets.Value("uint8"),
71
- "text": datasets.Value("string"),
72
- "rating": datasets.Value("uint8"),
73
- }
74
- ),
75
- supervised_keys=None,
76
- license=_LICENSE,
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
- my_urls = _URLs
83
- filepaths = dl_manager.download(my_urls)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN,
87
- gen_kwargs={
88
- "filepaths": filepaths,
89
- },
90
- ),
91
- ]
92
-
93
- def _generate_examples(self, filepaths):
94
- """Yields examples."""
95
- for file_id_, filepath in enumerate(filepaths):
96
- with open(filepath, encoding="utf-8") as f:
97
- csv_reader = csv.DictReader(f, delimiter=",", escapechar="\\")
98
- for id_, data in enumerate(csv_reader):
99
- yield f"{file_id_}/{id_}", {
100
- "id": data["id_fragmentu"],
101
- "text_id": data["id_tekstu"],
102
- "annotator_id": data["id_anotatora"],
103
- "minority_id": data["id_mniejszosci"],
104
- "negative_emotions": data["negatywne_emocje"],
105
- "call_to_action": data["wezw_ddzial"],
106
- "source_of_knowledge": data["typ_ramki"],
107
- "irony_sarcasm": data["ironia_sarkazm"],
108
- "topic": data["temat"],
109
- "text": data["tekst"],
110
- "rating": data["ocena"],
111
- }