albertvillanova HF staff commited on
Commit
d2a0f44
1 Parent(s): cf18961

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -5,8 +5,6 @@ language_creators:
5
  - crowdsourced
6
  language:
7
  - ar
8
- language_bcp47:
9
- - ar-SA
10
  license:
11
  - mit
12
  multilinguality:
@@ -21,7 +19,10 @@ task_ids:
21
  - extractive-qa
22
  paperswithcode_id: arcd
23
  pretty_name: ARCD
 
 
24
  dataset_info:
 
25
  features:
26
  - name: id
27
  dtype: string
@@ -37,16 +38,23 @@ dataset_info:
37
  dtype: string
38
  - name: answer_start
39
  dtype: int32
40
- config_name: plain_text
41
  splits:
42
  - name: train
43
- num_bytes: 811064
44
  num_examples: 693
45
  - name: validation
46
- num_bytes: 885648
47
  num_examples: 702
48
- download_size: 1942399
49
- dataset_size: 1696712
 
 
 
 
 
 
 
 
50
  ---
51
 
52
  # Dataset Card for "arcd"
 
5
  - crowdsourced
6
  language:
7
  - ar
 
 
8
  license:
9
  - mit
10
  multilinguality:
 
19
  - extractive-qa
20
  paperswithcode_id: arcd
21
  pretty_name: ARCD
22
+ language_bcp47:
23
+ - ar-SA
24
  dataset_info:
25
+ config_name: plain_text
26
  features:
27
  - name: id
28
  dtype: string
 
38
  dtype: string
39
  - name: answer_start
40
  dtype: int32
 
41
  splits:
42
  - name: train
43
+ num_bytes: 811036
44
  num_examples: 693
45
  - name: validation
46
+ num_bytes: 885620
47
  num_examples: 702
48
+ download_size: 365858
49
+ dataset_size: 1696656
50
+ configs:
51
+ - config_name: plain_text
52
+ data_files:
53
+ - split: train
54
+ path: plain_text/train-*
55
+ - split: validation
56
+ path: plain_text/validation-*
57
+ default: true
58
  ---
59
 
60
  # Dataset Card for "arcd"
dataset_infos.json CHANGED
@@ -1 +1,70 @@
1
- {"plain_text": {"description": " Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles.\n", "citation": "@inproceedings{mozannar-etal-2019-neural,\n title = {Neural {A}rabic Question Answering},\n author = {Mozannar, Hussein and Maamary, Elie and El Hajal, Karl and Hajj, Hazem},\n booktitle = {Proceedings of the Fourth Arabic Natural Language Processing Workshop},\n month = {aug},\n year = {2019},\n address = {Florence, Italy},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W19-4612},\n doi = {10.18653/v1/W19-4612},\n pages = {108--118},\n abstract = {This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.}\n}\n", "homepage": "https://github.com/husseinmozannar/SOQAL/tree/master/data", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "question-answering-extractive", "question_column": "question", "context_column": "context", "answers_column": "answers"}], "builder_name": "arcd", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 811064, "num_examples": 693, "dataset_name": "arcd"}, "validation": {"name": "validation", "num_bytes": 885648, "num_examples": 702, "dataset_name": "arcd"}}, "download_checksums": {"https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-train.json": {"num_bytes": 939840, "checksum": "6a973fda9f0b066e0547a85a3396e7294fa917e24b6efd7ce430769033a6ce15"}, "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-test.json": {"num_bytes": 1002559, "checksum": "b4ba4fb4227841bbce71e01b3eaecb33e9f17a08cde1ec91e5bc335da2c75135"}}, "download_size": 1942399, "post_processing_size": null, "dataset_size": 1696712, "size_in_bytes": 3639111}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "plain_text": {
3
+ "description": " Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles.\n",
4
+ "citation": "@inproceedings{mozannar-etal-2019-neural,\n title = {Neural {A}rabic Question Answering},\n author = {Mozannar, Hussein and Maamary, Elie and El Hajal, Karl and Hajj, Hazem},\n booktitle = {Proceedings of the Fourth Arabic Natural Language Processing Workshop},\n month = {aug},\n year = {2019},\n address = {Florence, Italy},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W19-4612},\n doi = {10.18653/v1/W19-4612},\n pages = {108--118},\n abstract = {This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.}\n}\n",
5
+ "homepage": "https://github.com/husseinmozannar/SOQAL/tree/master/data",
6
+ "license": "",
7
+ "features": {
8
+ "id": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "title": {
13
+ "dtype": "string",
14
+ "_type": "Value"
15
+ },
16
+ "context": {
17
+ "dtype": "string",
18
+ "_type": "Value"
19
+ },
20
+ "question": {
21
+ "dtype": "string",
22
+ "_type": "Value"
23
+ },
24
+ "answers": {
25
+ "feature": {
26
+ "text": {
27
+ "dtype": "string",
28
+ "_type": "Value"
29
+ },
30
+ "answer_start": {
31
+ "dtype": "int32",
32
+ "_type": "Value"
33
+ }
34
+ },
35
+ "_type": "Sequence"
36
+ }
37
+ },
38
+ "task_templates": [
39
+ {
40
+ "task": "question-answering-extractive"
41
+ }
42
+ ],
43
+ "builder_name": "parquet",
44
+ "dataset_name": "arcd",
45
+ "config_name": "plain_text",
46
+ "version": {
47
+ "version_str": "1.0.0",
48
+ "major": 1,
49
+ "minor": 0,
50
+ "patch": 0
51
+ },
52
+ "splits": {
53
+ "train": {
54
+ "name": "train",
55
+ "num_bytes": 811036,
56
+ "num_examples": 693,
57
+ "dataset_name": null
58
+ },
59
+ "validation": {
60
+ "name": "validation",
61
+ "num_bytes": 885620,
62
+ "num_examples": 702,
63
+ "dataset_name": null
64
+ }
65
+ },
66
+ "download_size": 365858,
67
+ "dataset_size": 1696656,
68
+ "size_in_bytes": 2062514
69
+ }
70
+ }
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8167f99702ed6b2457ba74b044b2b04bce61e70ac3df5fe46c0386a31a307490
3
+ size 173521
plain_text/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a74775a55a6b13d8430c2127be32efeb5c463a201c864b876d6eec29067072f8
3
+ size 192337