albertvillanova HF staff commited on
Commit
a7bc399
1 Parent(s): 566be64

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (06233c757e70673cf71249f47eea5166eb8d6288)
- Delete loading script (c5784feca89a65fd8742c09b860c04e8d51dc0c4)

Files changed (3) hide show
  1. README.md +8 -3
  2. data/train-00000-of-00001.parquet +3 -0
  3. roman_urdu.py +0 -94
README.md CHANGED
@@ -32,10 +32,15 @@ dataset_info:
32
  '2': Neutral
33
  splits:
34
  - name: train
35
- num_bytes: 1633423
36
  num_examples: 20229
37
- download_size: 1628349
38
- dataset_size: 1633423
 
 
 
 
 
39
  ---
40
 
41
  # Dataset Card for Roman Urdu Dataset
 
32
  '2': Neutral
33
  splits:
34
  - name: train
35
+ num_bytes: 1633411
36
  num_examples: 20229
37
+ download_size: 1060033
38
+ dataset_size: 1633411
39
+ configs:
40
+ - config_name: default
41
+ data_files:
42
+ - split: train
43
+ path: data/train-*
44
  ---
45
 
46
  # Dataset Card for Roman Urdu Dataset
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435cd809536b800ee0b92c0c0db344adc35fd0cf7916e9d058ae07880836bb1e
3
+ size 1060033
roman_urdu.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Roman Urdu data corpus with 20,000 polarity labeled records"""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
- from datasets.tasks import TextClassification
23
-
24
-
25
- _CITATION = """\
26
- @InProceedings{Sharf:2018,
27
- title = "Performing Natural Language Processing on Roman Urdu Datasets",
28
- authors = "Zareen Sharf and Saif Ur Rahman",
29
- booktitle = "International Journal of Computer Science and Network Security",
30
- volume = "18",
31
- number = "1",
32
- pages = "141-148",
33
- year = "2018"
34
- }
35
-
36
- @misc{Dua:2019,
37
- author = "Dua, Dheeru and Graff, Casey",
38
- year = "2017",
39
- title = "{UCI} Machine Learning Repository",
40
- url = "http://archive.ics.uci.edu/ml",
41
- institution = "University of California, Irvine, School of Information and Computer Sciences"
42
- }
43
- """
44
-
45
- _DESCRIPTION = """\
46
- This is an extensive compilation of Roman Urdu Dataset (Urdu written in Latin/Roman script) tagged for sentiment analysis.
47
- """
48
-
49
- _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Roman+Urdu+Data+Set"
50
-
51
- _URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00458/Roman%20Urdu%20DataSet.csv"
52
-
53
-
54
- class RomanUrdu(datasets.GeneratorBasedBuilder):
55
- """Roman Urdu sentences gathered from reviews of various e-commerce websites, comments on public Facebook pages, and twitter accounts, with positive, neutral, and negative polarity labels per each row."""
56
-
57
- VERSION = datasets.Version("1.1.0")
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {
64
- "sentence": datasets.Value("string"),
65
- "sentiment": datasets.features.ClassLabel(names=["Positive", "Negative", "Neutral"]),
66
- }
67
- ),
68
- supervised_keys=None,
69
- homepage=_HOMEPAGE,
70
- citation=_CITATION,
71
- task_templates=[TextClassification(text_column="sentence", label_column="sentiment")],
72
- )
73
-
74
- def _split_generators(self, dl_manager):
75
- data_dir = dl_manager.download_and_extract(_URL)
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN,
79
- gen_kwargs={
80
- "filepath": os.path.join(data_dir),
81
- "split": "train",
82
- },
83
- ),
84
- ]
85
-
86
- def _generate_examples(self, filepath, split):
87
- with open(filepath, encoding="utf-8") as f:
88
- reader = csv.reader(f, delimiter=",")
89
- for id_, row in enumerate(reader):
90
- yield id_, {
91
- "sentence": row[0],
92
- # 'Neative' typo in original dataset
93
- "sentiment": "Negative" if row[1] == "Neative" else row[1],
94
- }