abdoelsayed commited on
Commit
a9299b3
1 Parent(s): 67cb29d
Files changed (2) hide show
  1. MRC/ArabicaQA.py +30 -10
  2. arabicaqa.py +30 -11
MRC/ArabicaQA.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
2
 
3
  class CustomSQuADFormatDataset(GeneratorBasedBuilder):
@@ -5,7 +7,11 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
5
 
6
  VERSION = datasets.Version("1.0.0")
7
  BUILDER_CONFIGS = [
8
- datasets.BuilderConfig(name="ArabicaQA", version=VERSION, description="Custom dataset similar to SQuAD format.")
 
 
 
 
9
  ]
10
 
11
  def _info(self):
@@ -23,6 +29,7 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
23
  "answer_start": datasets.Value("int32"),
24
  }
25
  ),
 
26
  }
27
  ),
28
  supervised_keys=None,
@@ -34,19 +41,27 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
34
  urls_to_download = {
35
  "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
36
  "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
37
- "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json"
 
38
  }
39
  downloaded_files = dl_manager.download(urls_to_download)
40
 
41
  return [
42
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
43
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
44
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
45
-
46
  ]
47
 
48
- def _generate_examples(self, filepath):
49
- with open(filepath, encoding="utf-8") as f:
 
 
 
 
 
 
 
 
50
  squad_data = json.load(f)["data"]
51
  for article in squad_data:
52
  title = article.get("title", "")
@@ -56,10 +71,15 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
56
  id_ = qa["id"]
57
  question = qa["question"]
58
  answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
59
-
60
- yield id_, {
 
 
 
61
  "title": title,
62
  "context": context,
63
  "question": question,
64
  "answers": answers,
 
65
  }
 
 
1
+ import csv
2
+ import json
3
  from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
4
 
5
  class CustomSQuADFormatDataset(GeneratorBasedBuilder):
 
7
 
8
  VERSION = datasets.Version("1.0.0")
9
  BUILDER_CONFIGS = [
10
+ datasets.BuilderConfig(
11
+ name="ArabicaQA",
12
+ version=VERSION,
13
+ description="Custom dataset similar to SQuAD format, including CSV data."
14
+ )
15
  ]
16
 
17
  def _info(self):
 
29
  "answer_start": datasets.Value("int32"),
30
  }
31
  ),
32
+ # Additional fields from the CSV can be added here if needed
33
  }
34
  ),
35
  supervised_keys=None,
 
41
  urls_to_download = {
42
  "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
43
  "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
44
+ "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json",
45
+ "csv": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/all_data_meta.csv"
46
  }
47
  downloaded_files = dl_manager.download(urls_to_download)
48
 
49
  return [
50
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"json_filepath": downloaded_files["train"], "csv_filepath": downloaded_files["csv"]}),
51
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"json_filepath": downloaded_files["dev"], "csv_filepath": downloaded_files["csv"]}),
52
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"json_filepath": downloaded_files["test"], "csv_filepath": downloaded_files["csv"]}),
 
53
  ]
54
 
55
+ def _generate_examples(self, json_filepath, csv_filepath):
56
+ # Read the CSV file and store it in memory
57
+ csv_data = {}
58
+ with open(csv_filepath, encoding="utf-8") as csv_file:
59
+ csv_reader = csv.DictReader(csv_file)
60
+ for row in csv_reader:
61
+ csv_data[row['question_id']] = row
62
+
63
+ # Read the JSON file and yield examples
64
+ with open(json_filepath, encoding="utf-8") as f:
65
  squad_data = json.load(f)["data"]
66
  for article in squad_data:
67
  title = article.get("title", "")
 
71
  id_ = qa["id"]
72
  question = qa["question"]
73
  answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
74
+
75
+ # Combine data from the CSV if present
76
+ additional_csv_data = csv_data.get(id_, {})
77
+ # Include additional fields from the CSV in the example if necessary
78
+ example = {
79
  "title": title,
80
  "context": context,
81
  "question": question,
82
  "answers": answers,
83
+ # Add additional fields from the CSV here
84
  }
85
+ yield id_, example
arabicaqa.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
2
 
3
  class CustomSQuADFormatDataset(GeneratorBasedBuilder):
@@ -5,9 +7,12 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
5
 
6
  VERSION = datasets.Version("1.0.0")
7
  BUILDER_CONFIGS = [
8
- datasets.BuilderConfig(name="ArabicaQA", version=VERSION, description="Custom dataset similar to SQuAD format.")
 
 
 
 
9
  ]
10
-
11
 
12
  def _info(self):
13
  return DatasetInfo(
@@ -24,6 +29,7 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
24
  "answer_start": datasets.Value("int32"),
25
  }
26
  ),
 
27
  }
28
  ),
29
  supervised_keys=None,
@@ -35,19 +41,27 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
35
  urls_to_download = {
36
  "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
37
  "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
38
- "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json"
 
39
  }
40
  downloaded_files = dl_manager.download(urls_to_download)
41
 
42
  return [
43
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
44
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
45
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
46
-
47
  ]
48
 
49
- def _generate_examples(self, filepath):
50
- with open(filepath, encoding="utf-8") as f:
 
 
 
 
 
 
 
 
51
  squad_data = json.load(f)["data"]
52
  for article in squad_data:
53
  title = article.get("title", "")
@@ -57,10 +71,15 @@ class CustomSQuADFormatDataset(GeneratorBasedBuilder):
57
  id_ = qa["id"]
58
  question = qa["question"]
59
  answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
60
-
61
- yield id_, {
 
 
 
62
  "title": title,
63
  "context": context,
64
  "question": question,
65
  "answers": answers,
 
66
  }
 
 
1
+ import csv
2
+ import json
3
  from datasets import load_dataset_builder, DatasetInfo, DownloadConfig, GeneratorBasedBuilder, datasets
4
 
5
  class CustomSQuADFormatDataset(GeneratorBasedBuilder):
 
7
 
8
  VERSION = datasets.Version("1.0.0")
9
  BUILDER_CONFIGS = [
10
+ datasets.BuilderConfig(
11
+ name="ArabicaQA",
12
+ version=VERSION,
13
+ description="Custom dataset similar to SQuAD format, including CSV data."
14
+ )
15
  ]
 
16
 
17
  def _info(self):
18
  return DatasetInfo(
 
29
  "answer_start": datasets.Value("int32"),
30
  }
31
  ),
32
+ # Additional fields from the CSV can be added here if needed
33
  }
34
  ),
35
  supervised_keys=None,
 
41
  urls_to_download = {
42
  "train": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/train.json",
43
  "dev": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/val.json",
44
+ "test": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/test.json",
45
+ "csv": "https://huggingface.co/datasets/abdoelsayed/ArabicaQA/raw/main/MRC/all_data_meta.csv"
46
  }
47
  downloaded_files = dl_manager.download(urls_to_download)
48
 
49
  return [
50
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"json_filepath": downloaded_files["train"], "csv_filepath": downloaded_files["csv"]}),
51
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"json_filepath": downloaded_files["dev"], "csv_filepath": downloaded_files["csv"]}),
52
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"json_filepath": downloaded_files["test"], "csv_filepath": downloaded_files["csv"]}),
 
53
  ]
54
 
55
+ def _generate_examples(self, json_filepath, csv_filepath):
56
+ # Read the CSV file and store it in memory
57
+ csv_data = {}
58
+ with open(csv_filepath, encoding="utf-8") as csv_file:
59
+ csv_reader = csv.DictReader(csv_file)
60
+ for row in csv_reader:
61
+ csv_data[row['question_id']] = row
62
+
63
+ # Read the JSON file and yield examples
64
+ with open(json_filepath, encoding="utf-8") as f:
65
  squad_data = json.load(f)["data"]
66
  for article in squad_data:
67
  title = article.get("title", "")
 
71
  id_ = qa["id"]
72
  question = qa["question"]
73
  answers = [{"text": answer["text"], "answer_start": answer["answer_start"]} for answer in qa.get("answers", [])]
74
+
75
+ # Combine data from the CSV if present
76
+ additional_csv_data = csv_data.get(id_, {})
77
+ # Include additional fields from the CSV in the example if necessary
78
+ example = {
79
  "title": title,
80
  "context": context,
81
  "question": question,
82
  "answers": answers,
83
+ # Add additional fields from the CSV here
84
  }
85
+ yield id_, example