abdoelsayed commited on
Commit
fc8be7f
1 Parent(s): a9299b3
Files changed (2) hide show
  1. ArabicaQA.py +0 -66
  2. arabicaqa.py +0 -85
ArabicaQA.py DELETED
@@ -1,66 +0,0 @@
1
- from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
2
-
3
- class CustomSQuADFormatDataset(GeneratorBasedBuilder):
4
- """A custom dataset similar to SQuAD but tailored for 'ArabicaQA' hosted on Hugging Face."""
5
-
6
- VERSION = datasets.Version("1.0.0")
7
- BUILDER_CONFIGS = [
8
- datasets.BuilderConfig(name="ArabicaQA", version=VERSION, description="Custom dataset similar to SQuAD format.")
9
- ]
10
-
11
-
12
- def _info(self):
13
- return DatasetInfo(
14
- description="This dataset is formatted similarly to the SQuAD dataset.",
15
- features=datasets.Features(
16
- {
17
- "id": datasets.Value("string"),
18
- "title": datasets.Value("string"),
19
- "context": datasets.Value("string"),
20
- "question": datasets.Value("string"),
21
- "answers": datasets.features.Sequence(
22
- {
23
- "text": datasets.Value("string"),
24
- "answer_start": datasets.Value("int32"),
25
- }
26
- ),
27
- }
28
- ),
29
- supervised_keys=None,
30
- homepage="https://huggingface.co/datasets/abdoelsayed/ArabicaQA",
31
- citation="",
32
- )
33
-
34
- def _split_generators(self, dl_manager: DownloadConfig):
35
- urls_to_download = {
36
- "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
37
- "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
38
- "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json"
39
- }
40
- downloaded_files = dl_manager.download(urls_to_download)
41
-
42
- return [
43
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
44
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
45
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
46
-
47
- ]
48
-
49
- def _generate_examples(self, filepath):
50
- with open(filepath, encoding="utf-8") as f:
51
- squad_data = json.load(f)["data"]
52
- for article in squad_data:
53
- title = article.get("title", "")
54
- for paragraph in article["paragraphs"]:
55
- context = paragraph["context"]
56
- for qa in paragraph["qas"]:
57
- id_ = qa["id"]
58
- question = qa["question"]
59
- answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
60
-
61
- yield id_, {
62
- "title": title,
63
- "context": context,
64
- "question": question,
65
- "answers": answers,
66
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
arabicaqa.py DELETED
@@ -1,85 +0,0 @@
1
- import csv
2
- import json
3
- from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
4
-
5
- class CustomSQuADFormatDataset(GeneratorBasedBuilder):
6
- """A custom dataset similar to SQuAD but tailored for 'ArabicaQA' hosted on Hugging Face."""
7
-
8
- VERSION = datasets.Version("1.0.0")
9
- BUILDER_CONFIGS = [
10
- datasets.BuilderConfig(
11
- name="ArabicaQA",
12
- version=VERSION,
13
- description="Custom dataset similar to SQuAD format, including CSV data."
14
- )
15
- ]
16
-
17
- def _info(self):
18
- return DatasetInfo(
19
- description="This dataset is formatted similarly to the SQuAD dataset.",
20
- features=datasets.Features(
21
- {
22
- "id": datasets.Value("string"),
23
- "title": datasets.Value("string"),
24
- "context": datasets.Value("string"),
25
- "question": datasets.Value("string"),
26
- "answers": datasets.features.Sequence(
27
- {
28
- "text": datasets.Value("string"),
29
- "answer_start": datasets.Value("int32"),
30
- }
31
- ),
32
- # Additional fields from the CSV can be added here if needed
33
- }
34
- ),
35
- supervised_keys=None,
36
- homepage="https://huggingface.co/datasets/abdoelsayed/ArabicaQA",
37
- citation="",
38
- )
39
-
40
- def _split_generators(self, dl_manager: DownloadConfig):
41
- urls_to_download = {
42
- "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
43
- "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
44
- "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json",
45
- "csv": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/all_data_meta.csv"
46
- }
47
- downloaded_files = dl_manager.download(urls_to_download)
48
-
49
- return [
50
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"json_filepath": downloaded_files["train"], "csv_filepath": downloaded_files["csv"]}),
51
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"json_filepath": downloaded_files["dev"], "csv_filepath": downloaded_files["csv"]}),
52
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"json_filepath": downloaded_files["test"], "csv_filepath": downloaded_files["csv"]}),
53
- ]
54
-
55
- def _generate_examples(self, json_filepath, csv_filepath):
56
- # Read the CSV file and store it in memory
57
- csv_data = {}
58
- with open(csv_filepath, encoding="utf-8") as csv_file:
59
- csv_reader = csv.DictReader(csv_file)
60
- for row in csv_reader:
61
- csv_data[row['question_id']] = row
62
-
63
- # Read the JSON file and yield examples
64
- with open(json_filepath, encoding="utf-8") as f:
65
- squad_data = json.load(f)["data"]
66
- for article in squad_data:
67
- title = article.get("title", "")
68
- for paragraph in article["paragraphs"]:
69
- context = paragraph["context"]
70
- for qa in paragraph["qas"]:
71
- id_ = qa["id"]
72
- question = qa["question"]
73
- answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
74
-
75
- # Combine data from the CSV if present
76
- additional_csv_data = csv_data.get(id_, {})
77
- # Include additional fields from the CSV in the example if necessary
78
- example = {
79
- "title": title,
80
- "context": context,
81
- "question": question,
82
- "answers": answers,
83
- # Add additional fields from the CSV here
84
- }
85
- yield id_, example