FpOliveira commited on
Commit
08fd583
1 Parent(s): f7cad52

Update bookcorpus.py

Browse files
Files changed (1) hide show
  1. bookcorpus.py +42 -26
bookcorpus.py CHANGED
@@ -1,6 +1,5 @@
1
-
2
  # coding=utf-8
3
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
@@ -15,16 +14,21 @@
15
  # limitations under the License.
16
 
17
  # Lint as: python3
18
- """Toxic/Abusive Tweets Multilabel Classification Dataset for TuPi."""
19
 
20
  import os
21
  import pandas as pd
22
  import datasets
23
 
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
  _CITATION = """\
27
- # Replace with the citation for TuPi dataset
 
 
 
 
 
 
 
28
  """
29
 
30
  _DESCRIPTION = """\
@@ -45,23 +49,16 @@ The data includes content from Twitter and Instagram, collected between 2017 and
45
  - Other
46
  """
47
 
48
- # TODO: Add a link to an official homepage for the dataset here
49
- _HOMEPAGE = "# Replace with the TuPi dataset homepage"
50
-
51
- # TODO: Add the license for the dataset here if you can find it
52
- _LICENSE = "# Replace with the TuPi dataset license"
53
-
54
- # TODO: Add link to the official dataset URLs here
55
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
56
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
57
 
58
  _URLS = {
59
  "multilabel": "https://raw.githubusercontent.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset/main/datasets/tupi_hierarchy.csv",
60
  "binary": "https://raw.githubusercontent.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset/main/datasets/tupi_binary.csv",
61
- }
62
 
63
  class TuPi(datasets.GeneratorBasedBuilder):
64
- """Toxic/Abusive Tweets Classification Dataset for TuPi."""
65
 
66
  VERSION = datasets.Version("1.0.0")
67
 
@@ -69,12 +66,12 @@ class TuPi(datasets.GeneratorBasedBuilder):
69
  datasets.BuilderConfig(
70
  name="multilabel",
71
  version=VERSION,
72
- description="Replace with the multilabel dataset description.",
73
  ),
74
  datasets.BuilderConfig(
75
  name="binary",
76
  version=VERSION,
77
- description="Replace with the binary dataset description.",
78
  ),
79
  ]
80
 
@@ -85,14 +82,25 @@ class TuPi(datasets.GeneratorBasedBuilder):
85
  features = datasets.Features(
86
  {
87
  "text": datasets.Value("string"),
88
- "hate": datasets.ClassLabel(names=["not-hate", "hate"]),
89
  }
90
  )
91
  else:
92
  features = datasets.Features(
93
  {
94
  "text": datasets.Value("string"),
95
- # Replace with the appropriate class labels for TuPi multilabel dataset
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  )
98
 
@@ -132,12 +140,20 @@ class TuPi(datasets.GeneratorBasedBuilder):
132
  df = pd.read_csv(filepath, engine="python")
133
  for key, row in enumerate(df.itertuples()):
134
  if self.config.name == "multilabel":
135
- # Replace with the appropriate field names for TuPi multilabel dataset
136
  yield key, {
137
  "text": row.text,
138
- "label_1": int(row.label_1),
139
- "label_2": int(row.label_2),
140
- # Add more labels as needed
 
 
 
 
 
 
 
 
 
141
  }
142
  else:
143
- yield key, {"text": row.text, "label": int(row.toxic)}
 
 
1
  # coding=utf-8
2
+ # Copyright 2023 Your Name or Your Organization
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
+ """TuPi: Hate Speech Detection Dataset in Portuguese."""
18
 
19
  import os
20
  import pandas as pd
21
  import datasets
22
 
 
 
23
  _CITATION = """\
24
+ @article{YourReferenceHere,
25
+ author = {Your Name or Your Organization},
26
+ title = {TuPi: Largest Hate Speech Dataset in Portuguese},
27
+ year = {2023},
28
+ url = {URL to the official TuPi dataset publication or documentation},
29
+ eprinttype = {arXiv},
30
+ timestamp = {Current Timestamp},
31
+ }
32
  """
33
 
34
  _DESCRIPTION = """\
 
49
  - Other
50
  """
51
 
52
+ _HOMEPAGE = "# Add the TuPi dataset homepage URL"
53
+ _LICENSE = "# Add the TuPi dataset license URL"
 
 
 
 
 
 
 
54
 
55
  _URLS = {
56
  "multilabel": "https://raw.githubusercontent.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset/main/datasets/tupi_hierarchy.csv",
57
  "binary": "https://raw.githubusercontent.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset/main/datasets/tupi_binary.csv",
58
+ }
59
 
60
  class TuPi(datasets.GeneratorBasedBuilder):
61
+ """TuPi Hate Speech Detection Dataset in Portuguese."""
62
 
63
  VERSION = datasets.Version("1.0.0")
64
 
 
66
  datasets.BuilderConfig(
67
  name="multilabel",
68
  version=VERSION,
69
+ description="Full multilabel dataset with annotations for each category.",
70
  ),
71
  datasets.BuilderConfig(
72
  name="binary",
73
  version=VERSION,
74
+ description="Binary classification dataset with combined hate speech labels.",
75
  ),
76
  ]
77
 
 
82
  features = datasets.Features(
83
  {
84
  "text": datasets.Value("string"),
85
+ "label": datasets.ClassLabel(names=["non-hate", "hate"]),
86
  }
87
  )
88
  else:
89
  features = datasets.Features(
90
  {
91
  "text": datasets.Value("string"),
92
+ "aggressive": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
93
+ "ageism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
94
+ "aporophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
95
+ "body_shaming": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
96
+ "capacitism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
97
+ "lgbtphobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
98
+ "politics": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
99
+ "racism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
100
+ "religious_intolerance": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
101
+ "misogyny": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
102
+ "xenophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
103
+ "other": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
104
  }
105
  )
106
 
 
140
  df = pd.read_csv(filepath, engine="python")
141
  for key, row in enumerate(df.itertuples()):
142
  if self.config.name == "multilabel":
 
143
  yield key, {
144
  "text": row.text,
145
+ "aggressive": int(float(row.aggressive)),
146
+ "ageism": int(float(row.ageism)),
147
+ "aporophobia": int(float(row.aporophobia)),
148
+ "body_shaming": int(float(row.body_shaming)),
149
+ "capacitism": int(float(row.capacitism)),
150
+ "lgbtphobia": int(float(row.lgbtphobia)),
151
+ "politics": int(float(row.politics)),
152
+ "racism": int(float(row.racism)),
153
+ "religious_intolerance": int(float(row.religious_intolerance)),
154
+ "misogyny": int(float(row.misogyny)),
155
+ "xenophobia": int(float(row.xenophobia)),
156
+ "other": int(float(row.other)),
157
  }
158
  else:
159
+ yield key, {"text": row.text, "label": int(row.hate)}