nreimers commited on
Commit
4334605
1 Parent(s): 8c99aac
Files changed (5) hide show
  1. .gitattributes +4 -0
  2. README.md +6 -0
  3. prepare.py +45 -0
  4. test.jsonl +3 -0
  5. train.jsonl +3 -0
.gitattributes CHANGED
@@ -25,3 +25,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
29
+ test.jsonl filter=lfs diff=lfs merge=lfs -text
30
+ train.csv filter=lfs diff=lfs merge=lfs -text
31
+ original_train.csv filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Toxic Conversation
2
+ This is a version of the [Jigsaw Unintended Bias in Toxicity Classification dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/overview). It contains comments from the Civil Comments platform together with annotations if the comment is toxic or not.
3
+
4
+ 10 annotators annotated each example and, as recommended in the task page, set a comment as toxic when target >= 0.5
5
+
6
+ The dataset is inbalanced, with only about 8% of the comments marked as toxic.
prepare.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from collections import Counter
3
+ import json
4
+ import random
5
+
6
+
7
+ df = pd.read_csv("original_train.csv")
8
+
9
+ print(df)
10
+ """
11
+ for field in ["target", "severe_toxicity", "obscene", "identity_attack", "insult", "threat"]:
12
+ print("\n\n", field)
13
+ num_greater = 0
14
+ for val in df[field]:
15
+ if val >= 0.5:
16
+ num_greater += 1
17
+
18
+ print(num_greater, len(df[field]), f"{num_greater/len(df[field])*100:.2f}%")
19
+ """
20
+
21
+
22
+ rows = [{'text': row['comment_text'].strip(),
23
+ 'label': "1" if row['target'] >= 0.5 else "0",
24
+ 'label_text': "toxic" if row['target'] >= 0.5 else "not toxic",
25
+ } for idx, row in df.iterrows()]
26
+
27
+ random.seed(42)
28
+ random.shuffle(rows)
29
+
30
+ num_test = 10000
31
+ splits = {'test': rows[0:num_test], 'train': rows[num_test:]}
32
+
33
+ print("Train:", len(splits['train']))
34
+ print("Test:", len(splits['test']))
35
+
36
+ num_labels = Counter()
37
+
38
+ for row in splits['test']:
39
+ num_labels[row['label']] += 1
40
+ print(num_labels)
41
+
42
+ for split in ['train', 'test']:
43
+ with open(f'{split}.jsonl', 'w') as fOut:
44
+ for row in splits[split]:
45
+ fOut.write(json.dumps(row)+"\n")
test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54318e453857e6299039d1e2e3722fe7efd759d901dcc43a8fe544e2b6ed44e5
3
+ size 3520769
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:501b732fbc4bc39aba99ec07b28bc8f44d9e99a331dda9d73a68f13d2c72d4bf
3
+ size 634714202