parquet-converter commited on
Commit
90ca6d5
·
1 Parent(s): d0dafd0

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,2 +0,0 @@
1
- # hate_speech_offensive
2
- This dataset is a version from [hate_speech_offensive](https://huggingface.co/datasets/hate_speech_offensive), splitted into train and test set.
 
 
 
SetFit--hate_speech_offensive/json-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5303a68ee2d3f912ce814fdee4007dca823afc87ad23e0df13dd4b5f6ca3def4
3
+ size 139061
SetFit--hate_speech_offensive/json-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a960209e81773f764e8570d79b6b5c2572e8b0d817916090209e86a50abd820
3
+ size 1552354
prepare_data.py DELETED
@@ -1,17 +0,0 @@
1
- from datasets import load_dataset
2
- import json
3
- import random
4
-
5
- dataset = load_dataset("hate_speech_offensive")
6
- id2label = dataset['train'].features['class'].names
7
-
8
- rows = [{'text': row['tweet'], 'label': row['class'], 'label_text': id2label[row['class']]} for row in dataset['train']]
9
-
10
- random.shuffle(rows)
11
- num_test = 2000
12
- data_splits = {'test': rows[0:num_test], 'train': rows[num_test:]}
13
-
14
- for split in data_splits.keys():
15
- with open(f'{split}.jsonl', 'w') as fOut:
16
- for row in data_splits[split]:
17
- fOut.write(json.dumps(row)+"\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
train.jsonl DELETED
The diff for this file is too large to render. See raw diff