albertvillanova HF staff commited on
Commit
da252d5
1 Parent(s): 498dbe6

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (c07734174dae8da7962d9e7899d04278a6e0e209)
- Delete loading script (a642fa48bb836bd3a0b39b3a8d344badc966c854)

README.md CHANGED
@@ -30,13 +30,20 @@ dataset_info:
30
  dtype: string
31
  splits:
32
  - name: train
33
- num_bytes: 3191888
34
  num_examples: 31962
35
  - name: test
36
- num_bytes: 1711606
37
  num_examples: 17197
38
- download_size: 4738708
39
- dataset_size: 4903494
 
 
 
 
 
 
 
40
  train-eval-index:
41
  - config: default
42
  task: text-classification
 
30
  dtype: string
31
  splits:
32
  - name: train
33
+ num_bytes: 3191760
34
  num_examples: 31962
35
  - name: test
36
+ num_bytes: 1711534
37
  num_examples: 17197
38
+ download_size: 3180269
39
+ dataset_size: 4903294
40
+ configs:
41
+ - config_name: default
42
+ data_files:
43
+ - split: train
44
+ path: data/train-*
45
+ - split: test
46
+ path: data/test-*
47
  train-eval-index:
48
  - config: default
49
  task: text-classification
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e18ee8c6b8c4c721233f7c8a03a5e75534cca295f23add2f2a2a7487a8f1bcd
3
+ size 1112032
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd5df659f97f24d66059ca9f161ab177d1c544853d7f106e8e22858cdd801e5
3
+ size 2068237
tweets_hate_speech_detection.py DELETED
@@ -1,83 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Detecing which tweets showcase hate or racist remarks."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- The objective of this task is to detect hate speech in tweets. For the sake of simplicity, we say a tweet contains hate speech if it has a racist or sexist sentiment associated with it. So, the task is to classify racist or sexist tweets from other tweets.
28
-
29
- Formally, given a training sample of tweets and labels, where label ‘1’ denotes the tweet is racist/sexist and label ‘0’ denotes the tweet is not racist/sexist, your objective is to predict the labels on the given test dataset.
30
- """
31
-
32
- _HOMEPAGE = "https://github.com/sharmaroshan/Twitter-Sentiment-Analysis"
33
-
34
- _CITATION = """\
35
- @InProceedings{Z
36
- Roshan Sharma:dataset,
37
- title = {Sentimental Analysis of Tweets for Detecting Hate/Racist Speeches},
38
- authors={Roshan Sharma},
39
- year={2018}
40
- }
41
- """
42
-
43
- _URL = {
44
- "train": "https://raw.githubusercontent.com/sharmaroshan/Twitter-Sentiment-Analysis/master/train_tweet.csv",
45
- "test": "https://raw.githubusercontent.com/sharmaroshan/Twitter-Sentiment-Analysis/master/test_tweets.csv",
46
- }
47
-
48
-
49
- class TweetsHateSpeechDetection(datasets.GeneratorBasedBuilder):
50
- """Detecting which tweets showcase hate or racist remarks."""
51
-
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "label": datasets.ClassLabel(names=["no-hate-speech", "hate-speech"]),
58
- "tweet": datasets.Value("string"),
59
- }
60
- ),
61
- homepage=_HOMEPAGE,
62
- citation=_CITATION,
63
- task_templates=[TextClassification(text_column="tweet", label_column="label")],
64
- )
65
-
66
- def _split_generators(self, dl_manager):
67
- path = dl_manager.download(_URL)
68
- return [
69
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path["train"]}),
70
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path["test"]}),
71
- ]
72
-
73
- def _generate_examples(self, filepath):
74
- """Generate Tweet examples."""
75
- with open(filepath, encoding="utf-8") as csv_file:
76
- csv_reader = csv.DictReader(
77
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
78
- )
79
- for id_, row in enumerate(csv_reader):
80
- yield id_, {
81
- "label": int(row.setdefault("label", -1)),
82
- "tweet": row["tweet"],
83
- }