MotzWanted commited on
Commit
6622076
1 Parent(s): 6e3fb43

Create medwiki file

Browse files
Files changed (1) hide show
  1. dataset.py +121 -0
dataset.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from pathlib import Path
4
+
5
+ import datasets
6
+
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ TXT_PATTERN = r"^.*\.txt$"
11
+
12
+ _CITATION = """\
13
+ @misc{https://doi.org/10.48550/arxiv.2210.06345,
14
+ doi = {10.48550/ARXIV.2210.06345},
15
+
16
+ url = {https://arxiv.org/abs/2210.06345},
17
+
18
+ author = {Liévin, Valentin and Motzfeldt, Andreas Geert and Jensen, Ida Riis and Winther, Ole},
19
+
20
+ keywords = {Computation and Language (cs.CL), Information Retrieval (cs.IR), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7; H.3.3; I.2.1},
21
+
22
+ title = {Variational Open-Domain Question Answering},
23
+
24
+ publisher = {arXiv},
25
+
26
+ year = {2022},
27
+
28
+ copyright = {arXiv.org perpetual, non-exclusive license}
29
+ }
30
+ """
31
+
32
+ _VERSION = "0.0.1"
33
+ _HOMEPAGE = "https://github.com/VodLM"
34
+
35
+ _DESCRIPTION = """\
36
+ The MedWiki corpus, which is distributed under the MIT license,
37
+ consists of a subset of 4.5% of the English Wikipedia articles
38
+ and has been specifically curated for the MedMCQA and USMLE datasets.
39
+ The collection was created by utilizing the Wikipedia API to search
40
+ for articles related to each answer option present in the MedMCQA and
41
+ USMLE datasets. The top ten Wikipedia articles for each answer option
42
+ were selected and included in the final corpus. This subset covers a
43
+ wide range of topics in the field of medicine that could be relevant
44
+ for answering questions in this domain.
45
+ questions.
46
+ """
47
+
48
+ _URL = "https://f001.backblazeb2.com/file/FindZebraData/fz-openqa/datasets/medwiki_v6.zip"
49
+
50
+
51
+ class MedWikipediaCorpusConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for the MedQa English Corpus objecxt."""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for the Corpus object.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(MedWikipediaCorpusConfig, self).__init__(**kwargs)
60
+
61
+ class MedWikipediaCorpusGenerator(datasets.GeneratorBasedBuilder):
62
+ """MedWikipediaCorpus Dataset. Version 0.0.1"""
63
+
64
+ BUILDER_CONFIGS = [
65
+ MedWikipediaCorpusConfig(
66
+ name="plain_text",
67
+ version=datasets.Version(_VERSION, ""),
68
+ description="Plain text",
69
+ ),
70
+ ]
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "document.idx": datasets.Value("int32"),
78
+ "document.text": datasets.Value("string"),
79
+ "document.title": datasets.Value("string"),
80
+ }
81
+ ),
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager):
88
+ """Returns SplitGenerators."""
89
+ downloaded_file = dl_manager.download_and_extract(_URL)
90
+ if not Path(downloaded_file).is_dir():
91
+ raise Exception(
92
+ f"Could not download the dataset Content of `downloaded_file`:"
93
+ f"{open(downloaded_file, 'r').read()}"
94
+ )
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={"output_dir": Path(downloaded_file)},
99
+ )
100
+ ]
101
+
102
+ def _generate_examples(self, output_dir: str):
103
+ logger.info("generating examples from = %s", output_dir)
104
+ paths = [
105
+ p
106
+ for p in Path(output_dir).iterdir()
107
+ if p.is_dir() and (p.name.startswith("med_x_wiki") or p.name.startswith("wikipedia"))
108
+ ]
109
+ assert len(paths) == 1, f"Found {len(paths)} directories in {output_dir}: {paths}"
110
+ path = paths[0]
111
+
112
+ # list files
113
+ data_files = [os.path.join(path, p) for p in os.listdir(path) if re.findall(TXT_PATTERN, p)]
114
+
115
+ # iterate and yield documents
116
+ for i, fn in enumerate(data_files):
117
+ with open(fn, "r") as f:
118
+ # the first line is the title
119
+ title = f.readline()
120
+ text = f.read()
121
+ yield i, {"document.text": text, "document.idx": i, "document.title": title}