Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
2f417fc
1 Parent(s): 8e2eb8b

Host data file (#4)

Browse files

- Host data file (6be373e653ef26d1de37d870244c415f90b68d07)
- Update loading script (8784c14c2f9d9f111e90aea68f1c2d309d1ad0f1)
- Delete legacy dataset_infos.json (3736a096c3e8a39ae4e807c1692d9ac9f878d77e)

Files changed (3) hide show
  1. data/tamil.zip +3 -0
  2. dataset_infos.json +0 -1
  3. tamilmixsentiment.py +15 -41
data/tamil.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755617f136b09f9e3512cd65a56d290240a6a4ab63601f3bcc985138bc52103d
3
+ size 423050
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The first gold standard Tamil-English code-switched, sentiment-annotated corpus containing 15,744 comment posts from YouTube. Train: 11,335 Validation: 1,260 and Test: 3,149. This makes the largest general domain sentiment dataset for this relatively low-resource language with code-mixing phenomenon. The dataset contains all the three types of code-mixed sentences - Inter-Sentential switch, Intra-Sentential switch and Tag switching. Most comments were written in Roman script with either Tamil grammar with English lexicon or English grammar with Tamil lexicon. Some comments were written in Tamil script with English expressions in between.\n", "citation": "@inproceedings{chakravarthi-etal-2020-corpus,\n title = \"Corpus Creation for Sentiment Analysis in Code-Mixed {T}amil-{E}nglish Text\",\n author = \"Chakravarthi, Bharathi Raja and\n Muralidaran, Vigneshwaran and\n Priyadharshini, Ruba and\n McCrae, John Philip\",\n booktitle = \"Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)\",\n month = may,\n year = \"2020\",\n address = \"Marseille, France\",\n publisher = \"European Language Resources association\",\n url = \"https://www.aclweb.org/anthology/2020.sltu-1.28\",\n pages = \"202--210\",\n abstract = \"Understanding the sentiment of a comment from a video or an image is an essential task in many applications. Sentiment analysis of a text can be useful for various decision-making processes. One such application is to analyse the popular sentiments of videos on social media based on viewer comments. However, comments from social media do not follow strict rules of grammar, and they contain mixing of more than one language, often written in non-native scripts. Non-availability of annotated code-mixed data for a low-resourced language like Tamil also adds difficulty to this problem. To overcome this, we created a gold standard Tamil-English code-switched, sentiment-annotated corpus containing 15,744 comment posts from YouTube. In this paper, we describe the process of creating the corpus and assigning polarities. We present inter-annotator agreement and show the results of sentiment analysis trained on this corpus as a benchmark.\",\n language = \"English\",\n ISBN = \"979-10-95546-35-1\",\n}\n", "homepage": "https://dravidian-codemix.github.io/2020/datasets.html", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 5, "names": ["Positive", "Negative", "Mixed_feelings", "unknown_state", "not-Tamil"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["Mixed_feelings", "Negative", "Positive", "not-Tamil", "unknown_state"]}], "builder_name": "tamilmixsentiment", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 790132, "num_examples": 11335, "dataset_name": "tamilmixsentiment"}, "validation": {"name": "validation", "num_bytes": 89618, "num_examples": 1260, "dataset_name": "tamilmixsentiment"}, "test": {"name": "test", "num_bytes": 218764, "num_examples": 3149, "dataset_name": "tamilmixsentiment"}}, "download_checksums": {"https://drive.google.com/u/0/uc?id=1hDHeoFIfQzJec1NgZNXh3CTNbchiIvuG&export=download": {"num_bytes": 801261, "checksum": "4be61bf29cb68e83d682d33898bf41b2240d960f85e2ccec50e6c4fb8e24e243"}, "https://drive.google.com/u/0/uc?id=1lyCxkDwZyvKLaF5NZhS3JE2ftMmHAKBT&export=download": {"num_bytes": 90843, "checksum": "d4bc37c4d0ccbca7079ea5f5c0c6d3ef0fcca3a084be7af28ec052bc5cdfe5f2"}, "https://docs.google.com/spreadsheets/d/1_6HJWkOuD76pQ6M_RVoP4FOsUK22zPl1IMpq1atLhl4/export?format=tsv": {"num_bytes": 258688, "checksum": "8047bb44125bbc926d38aafea01e2b342bdbd317b53baee8c4dccaeae58021e1"}}, "download_size": 1150792, "post_processing_size": null, "dataset_size": 1098514, "size_in_bytes": 2249306}}
 
tamilmixsentiment.py CHANGED
@@ -16,6 +16,7 @@
16
 
17
 
18
  import csv
 
19
 
20
  import datasets
21
  from datasets.tasks import TextClassification
@@ -47,11 +48,12 @@ The first gold standard Tamil-English code-switched, sentiment-annotated corpus
47
 
48
  _LICENSE = ""
49
 
50
- _TRAIN_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1hDHeoFIfQzJec1NgZNXh3CTNbchiIvuG&export=download"
51
- _VALIDATION_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1lyCxkDwZyvKLaF5NZhS3JE2ftMmHAKBT&export=download"
52
- _TEST_DOWNLOAD_URL = (
53
- "https://docs.google.com/spreadsheets/d/1_6HJWkOuD76pQ6M_RVoP4FOsUK22zPl1IMpq1atLhl4/export?format=tsv"
54
- )
 
55
 
56
 
57
  class Tamilmixsentiment(datasets.GeneratorBasedBuilder):
@@ -74,50 +76,22 @@ class Tamilmixsentiment(datasets.GeneratorBasedBuilder):
74
  )
75
 
76
  def _split_generators(self, dl_manager):
77
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
78
- validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
79
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
80
  return [
81
  datasets.SplitGenerator(
82
- name=datasets.Split.TRAIN,
83
- gen_kwargs={
84
- "filepath": train_path,
85
- "split": "train",
86
- },
87
- ),
88
- datasets.SplitGenerator(
89
- name=datasets.Split.VALIDATION,
90
  gen_kwargs={
91
- "filepath": validation_path,
92
- "split": "validation",
93
  },
94
- ),
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TEST,
97
- gen_kwargs={
98
- "filepath": test_path,
99
- "split": "test",
100
- },
101
- ),
102
  ]
103
 
104
- def _generate_examples(self, filepath, split):
105
  """Generate Tamilmixsentiment examples."""
106
  with open(filepath, encoding="utf-8") as csv_file:
107
- csv_reader = csv.reader(
108
  csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
109
  )
110
- # skip header
111
- next(csv_reader)
112
-
113
  for id_, row in enumerate(csv_reader):
114
-
115
- # test - has a first column indicating sentence number
116
- if split == "test":
117
- idcol, text, label = row
118
-
119
- # train, validation
120
- else:
121
- text, label = row
122
-
123
- yield id_, {"text": text, "label": label}
16
 
17
 
18
  import csv
19
+ import os.path
20
 
21
  import datasets
22
  from datasets.tasks import TextClassification
48
 
49
  _LICENSE = ""
50
 
51
+ _URL = "data/tamil.zip"
52
+ _FILENAMES = {
53
+ "train": "tamil_train.tsv",
54
+ "validation": "tamil_dev.tsv",
55
+ "test": "tamil_test.tsv",
56
+ }
57
 
58
 
59
  class Tamilmixsentiment(datasets.GeneratorBasedBuilder):
76
  )
77
 
78
  def _split_generators(self, dl_manager):
79
+ data_dir = dl_manager.download_and_extract(_URL)
 
 
80
  return [
81
  datasets.SplitGenerator(
82
+ name=split,
 
 
 
 
 
 
 
83
  gen_kwargs={
84
+ "filepath": os.path.join(data_dir, _FILENAMES[split]),
 
85
  },
86
+ )
87
+ for split in (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)
 
 
 
 
 
 
88
  ]
89
 
90
+ def _generate_examples(self, filepath):
91
  """Generate Tamilmixsentiment examples."""
92
  with open(filepath, encoding="utf-8") as csv_file:
93
+ csv_reader = csv.DictReader(
94
  csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
95
  )
 
 
 
96
  for id_, row in enumerate(csv_reader):
97
+ yield id_, {"text": row["text"], "label": row["category"]}