system HF staff commited on
Commit
5539069
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
civil_comments.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CivilComments from Jigsaw Unintended Bias Kaggle Competition."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import csv
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @article{DBLP:journals/corr/abs-1903-04561,
29
+ author = {Daniel Borkan and
30
+ Lucas Dixon and
31
+ Jeffrey Sorensen and
32
+ Nithum Thain and
33
+ Lucy Vasserman},
34
+ title = {Nuanced Metrics for Measuring Unintended Bias with Real Data for Text
35
+ Classification},
36
+ journal = {CoRR},
37
+ volume = {abs/1903.04561},
38
+ year = {2019},
39
+ url = {http://arxiv.org/abs/1903.04561},
40
+ archivePrefix = {arXiv},
41
+ eprint = {1903.04561},
42
+ timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},
43
+ biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},
44
+ bibsource = {dblp computer science bibliography, https://dblp.org}
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """
49
+ The comments in this dataset come from an archive of the Civil Comments
50
+ platform, a commenting plugin for independent news sites. These public comments
51
+ were created from 2015 - 2017 and appeared on approximately 50 English-language
52
+ news sites across the world. When Civil Comments shut down in 2017, they chose
53
+ to make the public comments available in a lasting open archive to enable future
54
+ research. The original data, published on figshare, includes the public comment
55
+ text, some associated metadata such as article IDs, timestamps and
56
+ commenter-generated "civility" labels, but does not include user ids. Jigsaw
57
+ extended this dataset by adding additional labels for toxicity and identity
58
+ mentions. This data set is an exact replica of the data released for the
59
+ Jigsaw Unintended Bias in Toxicity Classification Kaggle challenge. This
60
+ dataset is released under CC0, as is the underlying comment text.
61
+ """
62
+
63
+ _DOWNLOAD_URL = "https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/civil_comments.zip"
64
+
65
+
66
+ class CivilComments(datasets.GeneratorBasedBuilder):
67
+ """Classification and tagging of 2M comments on news sites.
68
+
69
+ This version of the CivilComments Dataset provides access to the primary
70
+ seven labels that were annotated by crowd workers, the toxicity and other
71
+ tags are a value between 0 and 1 indicating the fraction of annotators that
72
+ assigned these attributes to the comment text.
73
+
74
+ The other tags, which are only available for a fraction of the input examples
75
+ are currently ignored, as are all of the attributes that were part of the
76
+ original civil comments release. See the Kaggle documentation for more
77
+ details about the available features.
78
+ """
79
+
80
+ VERSION = datasets.Version("0.9.0")
81
+
82
+ def _info(self):
83
+ return datasets.DatasetInfo(
84
+ description=_DESCRIPTION,
85
+ # datasets.features.FeatureConnectors
86
+ features=datasets.Features(
87
+ {
88
+ "text": datasets.Value("string"),
89
+ "toxicity": datasets.Value("float32"),
90
+ "severe_toxicity": datasets.Value("float32"),
91
+ "obscene": datasets.Value("float32"),
92
+ "threat": datasets.Value("float32"),
93
+ "insult": datasets.Value("float32"),
94
+ "identity_attack": datasets.Value("float32"),
95
+ "sexual_explicit": datasets.Value("float32"),
96
+ }
97
+ ),
98
+ # The supervised_keys version is very impoverished.
99
+ supervised_keys=("text", "toxicity"),
100
+ homepage="https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data",
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators."""
106
+ dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TRAIN,
110
+ gen_kwargs={"filename": os.path.join(dl_path, "train.csv"), "toxicity_label": "target"},
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "filename": os.path.join(dl_path, "test_public_expanded.csv"),
116
+ "toxicity_label": "toxicity",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "filename": os.path.join(dl_path, "test_private_expanded.csv"),
123
+ "toxicity_label": "toxicity",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filename, toxicity_label):
129
+ """Yields examples.
130
+
131
+ Each example contains a text input and then seven annotation labels.
132
+
133
+ Args:
134
+ filename: the path of the file to be read for this split.
135
+ toxicity_label: indicates 'target' or 'toxicity' to capture the variation
136
+ in the released labels for this dataset.
137
+
138
+ Yields:
139
+ A dictionary of features, all floating point except the input text.
140
+ """
141
+ with open(filename, encoding="utf-8") as f:
142
+ reader = csv.DictReader(f)
143
+ for row in reader:
144
+ example = {}
145
+ example["text"] = row["comment_text"]
146
+ example["toxicity"] = float(row[toxicity_label])
147
+ for label in ["severe_toxicity", "obscene", "threat", "insult", "identity_attack", "sexual_explicit"]:
148
+ example[label] = float(row[label])
149
+ yield row["id"], example
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nThe comments in this dataset come from an archive of the Civil Comments\nplatform, a commenting plugin for independent news sites. These public comments\nwere created from 2015 - 2017 and appeared on approximately 50 English-language\nnews sites across the world. When Civil Comments shut down in 2017, they chose\nto make the public comments available in a lasting open archive to enable future\nresearch. The original data, published on figshare, includes the public comment\ntext, some associated metadata such as article IDs, timestamps and\ncommenter-generated \"civility\" labels, but does not include user ids. Jigsaw\nextended this dataset by adding additional labels for toxicity and identity\nmentions. This data set is an exact replica of the data released for the\nJigsaw Unintended Bias in Toxicity Classification Kaggle challenge. This\ndataset is released under CC0, as is the underlying comment text.\n", "citation": "\n@article{DBLP:journals/corr/abs-1903-04561,\n author = {Daniel Borkan and\n Lucas Dixon and\n Jeffrey Sorensen and\n Nithum Thain and\n Lucy Vasserman},\n title = {Nuanced Metrics for Measuring Unintended Bias with Real Data for Text\n Classification},\n journal = {CoRR},\n volume = {abs/1903.04561},\n year = {2019},\n url = {http://arxiv.org/abs/1903.04561},\n archivePrefix = {arXiv},\n eprint = {1903.04561},\n timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "toxicity": {"dtype": "float32", "id": null, "_type": "Value"}, "severe_toxicity": {"dtype": "float32", "id": null, "_type": "Value"}, "obscene": {"dtype": "float32", "id": null, "_type": "Value"}, "threat": {"dtype": "float32", "id": null, "_type": "Value"}, "insult": {"dtype": "float32", "id": null, "_type": "Value"}, "identity_attack": {"dtype": "float32", "id": null, "_type": "Value"}, "sexual_explicit": {"dtype": "float32", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "text", "output": "toxicity"}, "builder_name": "civil_comments", "config_name": "default", "version": {"version_str": "0.9.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 9, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 32073013, "num_examples": 97320, "dataset_name": "civil_comments"}, "train": {"name": "train", "num_bytes": 596835730, "num_examples": 1804874, "dataset_name": "civil_comments"}, "validation": {"name": "validation", "num_bytes": 32326369, "num_examples": 97320, "dataset_name": "civil_comments"}}, "download_checksums": {"https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/civil_comments.zip": {"num_bytes": 414947977, "checksum": "767b71a3d9dc7a2eceb234d0c3e7e38604e11f59c12ba1cbb888ffd4ce6b6271"}}, "download_size": 414947977, "dataset_size": 661235112, "size_in_bytes": 1076183089}}
dummy/0.9.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f645cdf6b361fb222d20cffcdcad61a1ad5a012122cd1f0470415f6bedfff3c0
3
+ size 1928