Datasets:

Sub-tasks:
extractive-qa
Languages:
Persian
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
expert-generated
Annotations Creators:
expert-generated
ArXiv:
Tags:
License:
mariosasko hojjat-m commited on
Commit
1ecef50
1 Parent(s): afd64db

Update test split sample size (#2)

Browse files

- Update test split sample size (e8d0f2bee7a3f76a11e592f41d96813c91683388)
- Delete dataset_infos.json (2366c3d02dd5b4f21445f29565a4e9ff347d3ec5)


Co-authored-by: Hojjat Mokhtarabadi <hojjat-m@users.noreply.huggingface.co>

Files changed (2) hide show
  1. README.md +5 -5
  2. dataset_infos.json +0 -1
README.md CHANGED
@@ -39,13 +39,13 @@ dataset_info:
39
  num_bytes: 747679
40
  num_examples: 600
41
  - name: test
42
- num_bytes: 681945
43
- num_examples: 575
44
  - name: validation
45
- num_bytes: 163185
46
  num_examples: 125
47
- download_size: 4117863
48
- dataset_size: 1592809
49
  ---
50
 
51
  # Dataset Card for PersiNLU (Reading Comprehension)
39
  num_bytes: 747679
40
  num_examples: 600
41
  - name: test
42
+ num_bytes: 674711
43
+ num_examples: 570
44
  - name: validation
45
+ num_bytes: 163161
46
  num_examples: 125
47
+ download_size: 4105495
48
+ dataset_size: 1585527
49
  ---
50
 
51
  # Dataset Card for PersiNLU (Reading Comprehension)
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"parsinlu-repo": {"description": "A Persian reading comprehenion task (generating an answer, given a question and a context paragraph). \nThe questions are mined using Google auto-complete, their answers and the corresponding evidence documents are manually annotated by native speakers. \n", "citation": "@article{huggingface:dataset,\n title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},\n authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},\n year={2020}\n journal = {arXiv e-prints},\n eprint = {2012.06154}, \n}\n", "homepage": "https://github.com/persiannlp/parsinlu/", "license": "CC BY-NC-SA 4.0", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "answer_text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "parsinlu_reading_comprehension", "config_name": "parsinlu-repo", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 747679, "num_examples": 600, "dataset_name": "parsinlu_reading_comprehension"}, "test": {"name": "test", "num_bytes": 681945, "num_examples": 575, "dataset_name": "parsinlu_reading_comprehension"}, "validation": {"name": "validation", "num_bytes": 163185, "num_examples": 125, "dataset_name": "parsinlu_reading_comprehension"}}, "download_checksums": {"https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/reading_comprehension/train.jsonl": {"num_bytes": 1933004, "checksum": "488fa21f303d880b82b8ba590e0c5a5b61dfb1442a96aa2db19f487a16f5e480"}, "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/reading_comprehension/dev.jsonl": {"num_bytes": 424640, "checksum": "6ce2aed6d8ace6ed7f9ef4db9baba3b5efdfa9f99d605dccb494ce39cd63c9c6"}, "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/reading_comprehension/eval.jsonl": {"num_bytes": 1760219, "checksum": "95ac9cec4548cb35a5b7b2d85dabbd89fe0e724245935fdeeaddea3c07e644fe"}}, "download_size": 4117863, "post_processing_size": null, "dataset_size": 1592809, "size_in_bytes": 5710672}}