abdoelsayed commited on
Commit
0a6cfbd
1 Parent(s): 7a0e90f

Upload ArabicaQA.py

Browse files
Files changed (1) hide show
  1. ArabicaQA.py +66 -0
ArabicaQA.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
2
+
3
+ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
4
+ """A custom dataset similar to SQuAD but tailored for 'ArabicaQA' hosted on Hugging Face."""
5
+
6
+ VERSION = datasets.Version("1.0.0")
7
+ BUILDER_CONFIGS = [
8
+ datasets.BuilderConfig(name="ArabicaQA", version=VERSION, description="Custom dataset similar to SQuAD format.")
9
+ ]
10
+
11
+
12
+ def _info(self):
13
+ return DatasetInfo(
14
+ description="This dataset is formatted similarly to the SQuAD dataset.",
15
+ features=datasets.Features(
16
+ {
17
+ "id": datasets.Value("string"),
18
+ "title": datasets.Value("string"),
19
+ "context": datasets.Value("string"),
20
+ "question": datasets.Value("string"),
21
+ "answers": datasets.features.Sequence(
22
+ {
23
+ "text": datasets.Value("string"),
24
+ "answer_start": datasets.Value("int32"),
25
+ }
26
+ ),
27
+ }
28
+ ),
29
+ supervised_keys=None,
30
+ homepage="https://huggingface.co/datasets/abdoelsayed/ArabicaQA",
31
+ citation="",
32
+ )
33
+
34
+ def _split_generators(self, dl_manager: DownloadConfig):
35
+ urls_to_download = {
36
+ "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
37
+ "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
38
+ "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json"
39
+ }
40
+ downloaded_files = dl_manager.download(urls_to_download)
41
+
42
+ return [
43
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
44
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
45
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
46
+
47
+ ]
48
+
49
+ def _generate_examples(self, filepath):
50
+ with open(filepath, encoding="utf-8") as f:
51
+ squad_data = json.load(f)["data"]
52
+ for article in squad_data:
53
+ title = article.get("title", "")
54
+ for paragraph in article["paragraphs"]:
55
+ context = paragraph["context"]
56
+ for qa in paragraph["qas"]:
57
+ id_ = qa["id"]
58
+ question = qa["question"]
59
+ answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
60
+
61
+ yield id_, {
62
+ "title": title,
63
+ "context": context,
64
+ "question": question,
65
+ "answers": answers,
66
+ }