albertvillanova HF staff commited on
Commit
9dd23e1
1 Parent(s): c2cf662

Delete loading script

Browse files
Files changed (1) hide show
  1. civil_comments.py +0 -148
civil_comments.py DELETED
@@ -1,148 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CivilComments from Jigsaw Unintended Bias Kaggle Competition."""
18
-
19
-
20
- import csv
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """
27
- @article{DBLP:journals/corr/abs-1903-04561,
28
- author = {Daniel Borkan and
29
- Lucas Dixon and
30
- Jeffrey Sorensen and
31
- Nithum Thain and
32
- Lucy Vasserman},
33
- title = {Nuanced Metrics for Measuring Unintended Bias with Real Data for Text
34
- Classification},
35
- journal = {CoRR},
36
- volume = {abs/1903.04561},
37
- year = {2019},
38
- url = {http://arxiv.org/abs/1903.04561},
39
- archivePrefix = {arXiv},
40
- eprint = {1903.04561},
41
- timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},
42
- biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},
43
- bibsource = {dblp computer science bibliography, https://dblp.org}
44
- }
45
- """
46
-
47
- _DESCRIPTION = """
48
- The comments in this dataset come from an archive of the Civil Comments
49
- platform, a commenting plugin for independent news sites. These public comments
50
- were created from 2015 - 2017 and appeared on approximately 50 English-language
51
- news sites across the world. When Civil Comments shut down in 2017, they chose
52
- to make the public comments available in a lasting open archive to enable future
53
- research. The original data, published on figshare, includes the public comment
54
- text, some associated metadata such as article IDs, timestamps and
55
- commenter-generated "civility" labels, but does not include user ids. Jigsaw
56
- extended this dataset by adding additional labels for toxicity and identity
57
- mentions. This data set is an exact replica of the data released for the
58
- Jigsaw Unintended Bias in Toxicity Classification Kaggle challenge. This
59
- dataset is released under CC0, as is the underlying comment text.
60
- """
61
-
62
- _DOWNLOAD_URL = "https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/civil_comments.zip"
63
-
64
-
65
- class CivilComments(datasets.GeneratorBasedBuilder):
66
- """Classification and tagging of 2M comments on news sites.
67
-
68
- This version of the CivilComments Dataset provides access to the primary
69
- seven labels that were annotated by crowd workers, the toxicity and other
70
- tags are a value between 0 and 1 indicating the fraction of annotators that
71
- assigned these attributes to the comment text.
72
-
73
- The other tags, which are only available for a fraction of the input examples
74
- are currently ignored, as are all of the attributes that were part of the
75
- original civil comments release. See the Kaggle documentation for more
76
- details about the available features.
77
- """
78
-
79
- VERSION = datasets.Version("0.9.0")
80
-
81
- def _info(self):
82
- return datasets.DatasetInfo(
83
- description=_DESCRIPTION,
84
- # datasets.features.FeatureConnectors
85
- features=datasets.Features(
86
- {
87
- "text": datasets.Value("string"),
88
- "toxicity": datasets.Value("float32"),
89
- "severe_toxicity": datasets.Value("float32"),
90
- "obscene": datasets.Value("float32"),
91
- "threat": datasets.Value("float32"),
92
- "insult": datasets.Value("float32"),
93
- "identity_attack": datasets.Value("float32"),
94
- "sexual_explicit": datasets.Value("float32"),
95
- }
96
- ),
97
- # The supervised_keys version is very impoverished.
98
- supervised_keys=("text", "toxicity"),
99
- homepage="https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data",
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
- dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
106
- return [
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TRAIN,
109
- gen_kwargs={"filename": os.path.join(dl_path, "train.csv"), "toxicity_label": "target"},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- gen_kwargs={
114
- "filename": os.path.join(dl_path, "test_public_expanded.csv"),
115
- "toxicity_label": "toxicity",
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={
121
- "filename": os.path.join(dl_path, "test_private_expanded.csv"),
122
- "toxicity_label": "toxicity",
123
- },
124
- ),
125
- ]
126
-
127
- def _generate_examples(self, filename, toxicity_label):
128
- """Yields examples.
129
-
130
- Each example contains a text input and then seven annotation labels.
131
-
132
- Args:
133
- filename: the path of the file to be read for this split.
134
- toxicity_label: indicates 'target' or 'toxicity' to capture the variation
135
- in the released labels for this dataset.
136
-
137
- Yields:
138
- A dictionary of features, all floating point except the input text.
139
- """
140
- with open(filename, encoding="utf-8") as f:
141
- reader = csv.DictReader(f)
142
- for row in reader:
143
- example = {}
144
- example["text"] = row["comment_text"]
145
- example["toxicity"] = float(row[toxicity_label])
146
- for label in ["severe_toxicity", "obscene", "threat", "insult", "identity_attack", "sexual_explicit"]:
147
- example[label] = float(row[label])
148
- yield row["id"], example