system HF staff commited on
Commit
65a9bca
1 Parent(s): 0923ea5

Update files from the datasets library (from 1.8.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.8.0

Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. imdb_urdu_reviews.py +2 -0
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "\nLarge Movie translated Urdu Reviews Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous\nbenchmark datasets. We provide a set of 40,000 highly polar movie reviews for training, and 10,000 for testing.\nTo increase the availability of sentiment analysis dataset for a low recourse language like Urdu,\nwe opted to use the already available IMDB Dataset. we have translated this dataset using google translator.\nThis is a binary classification dataset having two classes as positive and negative.\nThe reason behind using this dataset is high polarity for each class.\nIt contains 50k samples equally divided in two classes.\n", "citation": "\n@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly,nRaymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y...},\n title = {Learning Word Vectors for Sentiment Analysis},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n", "homepage": "https://github.com/mirfan899/Urdu", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentiment": {"num_classes": 2, "names": ["positive", "negative"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "imdb", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 114670811, "num_examples": 50000, "dataset_name": "imdb"}}, "download_checksums": {"https://github.com/mirfan899/Urdu/blob/master/sentiment/imdb_urdu_reviews.csv.tar.gz?raw=true": {"num_bytes": 31510992, "checksum": "f60f7e9972661dc5d8ec1c867972ae35f86dac32de43a274a2a794095dccdf99"}}, "download_size": 31510992, "post_processing_size": null, "dataset_size": 114670811, "size_in_bytes": 146181803}}
 
1
+ {"default": {"description": "\nLarge Movie translated Urdu Reviews Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous\nbenchmark datasets. We provide a set of 40,000 highly polar movie reviews for training, and 10,000 for testing.\nTo increase the availability of sentiment analysis dataset for a low recourse language like Urdu,\nwe opted to use the already available IMDB Dataset. we have translated this dataset using google translator.\nThis is a binary classification dataset having two classes as positive and negative.\nThe reason behind using this dataset is high polarity for each class.\nIt contains 50k samples equally divided in two classes.\n", "citation": "\n@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly,nRaymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y...},\n title = {Learning Word Vectors for Sentiment Analysis},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n", "homepage": "https://github.com/mirfan899/Urdu", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentiment": {"num_classes": 2, "names": ["positive", "negative"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "sentence", "label_column": "sentiment", "labels": ["negative", "positive"]}], "builder_name": "imdb_urdu_reviews", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 114670811, "num_examples": 50000, "dataset_name": "imdb_urdu_reviews"}}, "download_checksums": {"https://github.com/mirfan899/Urdu/blob/master/sentiment/imdb_urdu_reviews.csv.tar.gz?raw=true": {"num_bytes": 31510992, "checksum": "f60f7e9972661dc5d8ec1c867972ae35f86dac32de43a274a2a794095dccdf99"}}, "download_size": 31510992, "post_processing_size": null, "dataset_size": 114670811, "size_in_bytes": 146181803}}
imdb_urdu_reviews.py CHANGED
@@ -5,6 +5,7 @@ import csv
5
  import os
6
 
7
  import datasets
 
8
 
9
 
10
  _CITATION = """
@@ -50,6 +51,7 @@ class ImdbUrduReviews(datasets.GeneratorBasedBuilder):
50
  ),
51
  citation=_CITATION,
52
  homepage=_HOMEPAGE,
 
53
  )
54
 
55
  def _split_generators(self, dl_manager):
 
5
  import os
6
 
7
  import datasets
8
+ from datasets.tasks import TextClassification
9
 
10
 
11
  _CITATION = """
 
51
  ),
52
  citation=_CITATION,
53
  homepage=_HOMEPAGE,
54
+ task_templates=[TextClassification(text_column="sentence", label_column="sentiment")],
55
  )
56
 
57
  def _split_generators(self, dl_manager):