Convert dataset to Parquet

#5
by albertvillanova HF staff - opened
README.md CHANGED
@@ -34,16 +34,25 @@ dataset_info:
34
  '4': not-Tamil
35
  splits:
36
  - name: train
37
- num_bytes: 790132
38
  num_examples: 11335
39
  - name: validation
40
- num_bytes: 89618
41
  num_examples: 1260
42
  - name: test
43
- num_bytes: 218764
44
  num_examples: 3149
45
- download_size: 1150792
46
- dataset_size: 1098514
 
 
 
 
 
 
 
 
 
47
  ---
48
 
49
  # Dataset Card for Tamilmixsentiment
 
34
  '4': not-Tamil
35
  splits:
36
  - name: train
37
+ num_bytes: 790124
38
  num_examples: 11335
39
  - name: validation
40
+ num_bytes: 89614
41
  num_examples: 1260
42
  - name: test
43
+ num_bytes: 218760
44
  num_examples: 3149
45
+ download_size: 708889
46
+ dataset_size: 1098498
47
+ configs:
48
+ - config_name: default
49
+ data_files:
50
+ - split: train
51
+ path: data/train-*
52
+ - split: validation
53
+ path: data/validation-*
54
+ - split: test
55
+ path: data/test-*
56
  ---
57
 
58
  # Dataset Card for Tamilmixsentiment
data/{tamil.zip → test-00000-of-00001.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:755617f136b09f9e3512cd65a56d290240a6a4ab63601f3bcc985138bc52103d
3
- size 423050
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e917a0b139a00ecb65a78b45cd572c0cb4ab271591dd73ed3cadc856372698c
3
+ size 141137
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42cf1c94285e9e086938c878b2ada491024e73bc6894cf602670086d6cc80b23
3
+ size 507494
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b1f8d99e1194c527673580013bcc07f5e32acb7d71269935ba4226dde0f259
3
+ size 60258
tamilmixsentiment.py DELETED
@@ -1,97 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Code-Mixed Tamil-English Text for Sentiment Analysis"""
16
-
17
-
18
- import csv
19
- import os.path
20
-
21
- import datasets
22
- from datasets.tasks import TextClassification
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{chakravarthi-etal-2020-corpus,
27
- title = "Corpus Creation for Sentiment Analysis in Code-Mixed {T}amil-{E}nglish Text",
28
- author = "Chakravarthi, Bharathi Raja and
29
- Muralidaran, Vigneshwaran and
30
- Priyadharshini, Ruba and
31
- McCrae, John Philip",
32
- booktitle = "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)",
33
- month = may,
34
- year = "2020",
35
- address = "Marseille, France",
36
- publisher = "European Language Resources association",
37
- url = "https://www.aclweb.org/anthology/2020.sltu-1.28",
38
- pages = "202--210",
39
- abstract = "Understanding the sentiment of a comment from a video or an image is an essential task in many applications. Sentiment analysis of a text can be useful for various decision-making processes. One such application is to analyse the popular sentiments of videos on social media based on viewer comments. However, comments from social media do not follow strict rules of grammar, and they contain mixing of more than one language, often written in non-native scripts. Non-availability of annotated code-mixed data for a low-resourced language like Tamil also adds difficulty to this problem. To overcome this, we created a gold standard Tamil-English code-switched, sentiment-annotated corpus containing 15,744 comment posts from YouTube. In this paper, we describe the process of creating the corpus and assigning polarities. We present inter-annotator agreement and show the results of sentiment analysis trained on this corpus as a benchmark.",
40
- language = "English",
41
- ISBN = "979-10-95546-35-1",
42
- }
43
- """
44
-
45
- _DESCRIPTION = """\
46
- The first gold standard Tamil-English code-switched, sentiment-annotated corpus containing 15,744 comment posts from YouTube. Train: 11,335 Validation: 1,260 and Test: 3,149. This makes the largest general domain sentiment dataset for this relatively low-resource language with code-mixing phenomenon. The dataset contains all the three types of code-mixed sentences - Inter-Sentential switch, Intra-Sentential switch and Tag switching. Most comments were written in Roman script with either Tamil grammar with English lexicon or English grammar with Tamil lexicon. Some comments were written in Tamil script with English expressions in between.
47
- """
48
-
49
- _LICENSE = ""
50
-
51
- _URL = "data/tamil.zip"
52
- _FILENAMES = {
53
- "train": "tamil_train.tsv",
54
- "validation": "tamil_dev.tsv",
55
- "test": "tamil_test.tsv",
56
- }
57
-
58
-
59
- class Tamilmixsentiment(datasets.GeneratorBasedBuilder):
60
- """Tamilmixsentiment sentiment analysis dataset."""
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- "text": datasets.Value("string"),
68
- "label": datasets.features.ClassLabel(
69
- names=["Positive", "Negative", "Mixed_feelings", "unknown_state", "not-Tamil"]
70
- ),
71
- }
72
- ),
73
- homepage="https://dravidian-codemix.github.io/2020/datasets.html",
74
- citation=_CITATION,
75
- task_templates=[TextClassification(text_column="text", label_column="label")],
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- data_dir = dl_manager.download_and_extract(_URL)
80
- return [
81
- datasets.SplitGenerator(
82
- name=split,
83
- gen_kwargs={
84
- "filepath": os.path.join(data_dir, _FILENAMES[split]),
85
- },
86
- )
87
- for split in (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)
88
- ]
89
-
90
- def _generate_examples(self, filepath):
91
- """Generate Tamilmixsentiment examples."""
92
- with open(filepath, encoding="utf-8") as csv_file:
93
- csv_reader = csv.DictReader(
94
- csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
95
- )
96
- for id_, row in enumerate(csv_reader):
97
- yield id_, {"text": row["text"], "label": row["category"]}