tillwenke commited on
Commit
463a104
1 Parent(s): 90328a9

before creating dataset

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
- /env
 
 
1
+ /env
2
+ credentials.json
.ipynb ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 9,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import pandas as pd\n",
10
+ "a = pd.read_parquet(\"data/test.parquet\")\n",
11
+ "b = pd.read_parquet(\"data/passages.parquet\")"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 13,
17
+ "metadata": {},
18
+ "outputs": [
19
+ {
20
+ "data": {
21
+ "text/html": [
22
+ "<div>\n",
23
+ "<style scoped>\n",
24
+ " .dataframe tbody tr th:only-of-type {\n",
25
+ " vertical-align: middle;\n",
26
+ " }\n",
27
+ "\n",
28
+ " .dataframe tbody tr th {\n",
29
+ " vertical-align: top;\n",
30
+ " }\n",
31
+ "\n",
32
+ " .dataframe thead th {\n",
33
+ " text-align: right;\n",
34
+ " }\n",
35
+ "</style>\n",
36
+ "<table border=\"1\" class=\"dataframe\">\n",
37
+ " <thead>\n",
38
+ " <tr style=\"text-align: right;\">\n",
39
+ " <th></th>\n",
40
+ " <th>question</th>\n",
41
+ " <th>answer</th>\n",
42
+ " <th>relevant_passage_ids</th>\n",
43
+ " </tr>\n",
44
+ " <tr>\n",
45
+ " <th>id</th>\n",
46
+ " <th></th>\n",
47
+ " <th></th>\n",
48
+ " <th></th>\n",
49
+ " </tr>\n",
50
+ " </thead>\n",
51
+ " <tbody>\n",
52
+ " <tr>\n",
53
+ " <th>0</th>\n",
54
+ " <td>Is Hirschsprung disease a mendelian or a multi...</td>\n",
55
+ " <td>Coding sequence mutations in RET, GDNF, EDNRB,...</td>\n",
56
+ " <td>[20598273, 6650562, 15829955, 15617541, 230011...</td>\n",
57
+ " </tr>\n",
58
+ " <tr>\n",
59
+ " <th>1</th>\n",
60
+ " <td>List signaling molecules (ligands) that intera...</td>\n",
61
+ " <td>The 7 known EGFR ligands are: epidermal growt...</td>\n",
62
+ " <td>[23821377, 24323361, 23382875, 22247333, 23787...</td>\n",
63
+ " </tr>\n",
64
+ " <tr>\n",
65
+ " <th>2</th>\n",
66
+ " <td>Is the protein Papilin secreted?</td>\n",
67
+ " <td>Yes, papilin is a secreted protein</td>\n",
68
+ " <td>[21784067, 19297413, 15094122, 7515725, 332004...</td>\n",
69
+ " </tr>\n",
70
+ " <tr>\n",
71
+ " <th>3</th>\n",
72
+ " <td>Are long non coding RNAs spliced?</td>\n",
73
+ " <td>Long non coding RNAs appear to be spliced thro...</td>\n",
74
+ " <td>[22955974, 21622663, 22707570, 22955988, 24285...</td>\n",
75
+ " </tr>\n",
76
+ " <tr>\n",
77
+ " <th>4</th>\n",
78
+ " <td>Is RANKL secreted from the cells?</td>\n",
79
+ " <td>Receptor activator of nuclear factor κB ligand...</td>\n",
80
+ " <td>[22867712, 23827649, 21618594, 23835909, 24265...</td>\n",
81
+ " </tr>\n",
82
+ " <tr>\n",
83
+ " <th>...</th>\n",
84
+ " <td>...</td>\n",
85
+ " <td>...</td>\n",
86
+ " <td>...</td>\n",
87
+ " </tr>\n",
88
+ " <tr>\n",
89
+ " <th>4714</th>\n",
90
+ " <td>Is PPROM a condition that occurs in males or f...</td>\n",
91
+ " <td>Preterm premature rupture of fetal membranes (...</td>\n",
92
+ " <td>[23599878, 23573382, 24304137, 18301713, 23179...</td>\n",
93
+ " </tr>\n",
94
+ " <tr>\n",
95
+ " <th>4715</th>\n",
96
+ " <td>What is EpiMethylTag?</td>\n",
97
+ " <td>EpiMethylTag is a fast, low-input, low sequenc...</td>\n",
98
+ " <td>[31752933]</td>\n",
99
+ " </tr>\n",
100
+ " <tr>\n",
101
+ " <th>4716</th>\n",
102
+ " <td>What is the target of Sutimlimab?</td>\n",
103
+ " <td>Sutimlimab is a novel humanized monoclonal ant...</td>\n",
104
+ " <td>[30635392, 31229501, 33826820, 32176765, 31114...</td>\n",
105
+ " </tr>\n",
106
+ " <tr>\n",
107
+ " <th>4717</th>\n",
108
+ " <td>Can parasite infections by Schistosoma japonic...</td>\n",
109
+ " <td>A peptide named as SJMHE1 from Schistosoma jap...</td>\n",
110
+ " <td>[26840774, 34703270, 28614408, 31496071, 18654...</td>\n",
111
+ " </tr>\n",
112
+ " <tr>\n",
113
+ " <th>4718</th>\n",
114
+ " <td>Describe Multilocus Inherited Neoplasia Allele...</td>\n",
115
+ " <td>Genetic testing of hereditary cancer using com...</td>\n",
116
+ " <td>[30580288]</td>\n",
117
+ " </tr>\n",
118
+ " </tbody>\n",
119
+ "</table>\n",
120
+ "<p>4719 rows × 3 columns</p>\n",
121
+ "</div>"
122
+ ],
123
+ "text/plain": [
124
+ " question \\\n",
125
+ "id \n",
126
+ "0 Is Hirschsprung disease a mendelian or a multi... \n",
127
+ "1 List signaling molecules (ligands) that intera... \n",
128
+ "2 Is the protein Papilin secreted? \n",
129
+ "3 Are long non coding RNAs spliced? \n",
130
+ "4 Is RANKL secreted from the cells? \n",
131
+ "... ... \n",
132
+ "4714 Is PPROM a condition that occurs in males or f... \n",
133
+ "4715 What is EpiMethylTag? \n",
134
+ "4716 What is the target of Sutimlimab? \n",
135
+ "4717 Can parasite infections by Schistosoma japonic... \n",
136
+ "4718 Describe Multilocus Inherited Neoplasia Allele... \n",
137
+ "\n",
138
+ " answer \\\n",
139
+ "id \n",
140
+ "0 Coding sequence mutations in RET, GDNF, EDNRB,... \n",
141
+ "1 The 7 known EGFR ligands are: epidermal growt... \n",
142
+ "2 Yes, papilin is a secreted protein \n",
143
+ "3 Long non coding RNAs appear to be spliced thro... \n",
144
+ "4 Receptor activator of nuclear factor κB ligand... \n",
145
+ "... ... \n",
146
+ "4714 Preterm premature rupture of fetal membranes (... \n",
147
+ "4715 EpiMethylTag is a fast, low-input, low sequenc... \n",
148
+ "4716 Sutimlimab is a novel humanized monoclonal ant... \n",
149
+ "4717 A peptide named as SJMHE1 from Schistosoma jap... \n",
150
+ "4718 Genetic testing of hereditary cancer using com... \n",
151
+ "\n",
152
+ " relevant_passage_ids \n",
153
+ "id \n",
154
+ "0 [20598273, 6650562, 15829955, 15617541, 230011... \n",
155
+ "1 [23821377, 24323361, 23382875, 22247333, 23787... \n",
156
+ "2 [21784067, 19297413, 15094122, 7515725, 332004... \n",
157
+ "3 [22955974, 21622663, 22707570, 22955988, 24285... \n",
158
+ "4 [22867712, 23827649, 21618594, 23835909, 24265... \n",
159
+ "... ... \n",
160
+ "4714 [23599878, 23573382, 24304137, 18301713, 23179... \n",
161
+ "4715 [31752933] \n",
162
+ "4716 [30635392, 31229501, 33826820, 32176765, 31114... \n",
163
+ "4717 [26840774, 34703270, 28614408, 31496071, 18654... \n",
164
+ "4718 [30580288] \n",
165
+ "\n",
166
+ "[4719 rows x 3 columns]"
167
+ ]
168
+ },
169
+ "execution_count": 13,
170
+ "metadata": {},
171
+ "output_type": "execute_result"
172
+ }
173
+ ],
174
+ "source": [
175
+ "a"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": 11,
181
+ "metadata": {},
182
+ "outputs": [
183
+ {
184
+ "data": {
185
+ "text/html": [
186
+ "<div>\n",
187
+ "<style scoped>\n",
188
+ " .dataframe tbody tr th:only-of-type {\n",
189
+ " vertical-align: middle;\n",
190
+ " }\n",
191
+ "\n",
192
+ " .dataframe tbody tr th {\n",
193
+ " vertical-align: top;\n",
194
+ " }\n",
195
+ "\n",
196
+ " .dataframe thead th {\n",
197
+ " text-align: right;\n",
198
+ " }\n",
199
+ "</style>\n",
200
+ "<table border=\"1\" class=\"dataframe\">\n",
201
+ " <thead>\n",
202
+ " <tr style=\"text-align: right;\">\n",
203
+ " <th></th>\n",
204
+ " <th>passage</th>\n",
205
+ " </tr>\n",
206
+ " <tr>\n",
207
+ " <th>id</th>\n",
208
+ " <th></th>\n",
209
+ " </tr>\n",
210
+ " </thead>\n",
211
+ " <tbody>\n",
212
+ " <tr>\n",
213
+ " <th>21495810</th>\n",
214
+ " <td>OBJECT: Factors determining choice of an acade...</td>\n",
215
+ " </tr>\n",
216
+ " <tr>\n",
217
+ " <th>26869762</th>\n",
218
+ " <td>Castleman disease (CD) is a rare, heterogeneou...</td>\n",
219
+ " </tr>\n",
220
+ " <tr>\n",
221
+ " <th>28049410</th>\n",
222
+ " <td>BACKGROUND: Data extraction and integration me...</td>\n",
223
+ " </tr>\n",
224
+ " <tr>\n",
225
+ " <th>24510469</th>\n",
226
+ " <td>Flecainide is recommended as a first-line anti...</td>\n",
227
+ " </tr>\n",
228
+ " <tr>\n",
229
+ " <th>8650761</th>\n",
230
+ " <td>Primary intestinal lymphangiectasia (PIL), fir...</td>\n",
231
+ " </tr>\n",
232
+ " </tbody>\n",
233
+ "</table>\n",
234
+ "</div>"
235
+ ],
236
+ "text/plain": [
237
+ " passage\n",
238
+ "id \n",
239
+ "21495810 OBJECT: Factors determining choice of an acade...\n",
240
+ "26869762 Castleman disease (CD) is a rare, heterogeneou...\n",
241
+ "28049410 BACKGROUND: Data extraction and integration me...\n",
242
+ "24510469 Flecainide is recommended as a first-line anti...\n",
243
+ "8650761 Primary intestinal lymphangiectasia (PIL), fir..."
244
+ ]
245
+ },
246
+ "execution_count": 11,
247
+ "metadata": {},
248
+ "output_type": "execute_result"
249
+ }
250
+ ],
251
+ "source": [
252
+ "b"
253
+ ]
254
+ }
255
+ ],
256
+ "metadata": {
257
+ "kernelspec": {
258
+ "display_name": "env",
259
+ "language": "python",
260
+ "name": "python3"
261
+ },
262
+ "language_info": {
263
+ "codemirror_mode": {
264
+ "name": "ipython",
265
+ "version": 3
266
+ },
267
+ "file_extension": ".py",
268
+ "mimetype": "text/x-python",
269
+ "name": "python",
270
+ "nbconvert_exporter": "python",
271
+ "pygments_lexer": "ipython3",
272
+ "version": "3.10.12"
273
+ }
274
+ },
275
+ "nbformat": 4,
276
+ "nbformat_minor": 2
277
+ }
README.md CHANGED
@@ -11,5 +11,14 @@ tags:
11
  - information-retrieval
12
  - question-answering
13
  - biomedical
 
 
 
 
 
 
 
 
 
14
  ---
15
- Derives from http://participants-area.bioasq.org/Tasks/11b/trainingDataset/ we generated our own subset using `generate.py`.
 
11
  - information-retrieval
12
  - question-answering
13
  - biomedical
14
+ configs:
15
+ - config_name: text-corpus
16
+ data_files:
17
+ - split: passages
18
+ path: "data/passages.parquet/*"
19
+ - config_name: question-answer
20
+ data_files:
21
+ - split: test
22
+ path: "data/test.parquet/*"
23
  ---
24
+ Derives from http://participants-area.bioasq.org/Tasks/11b/trainingDataset/ we generated our own subset using `generate.py`.
data/passages.parquet/part.0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c288905f142dde9c3c21207333380a81d3f34603584851be02ccf7e543041934
3
+ size 12581
data/test.parquet/part.0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12679e03615d16b423b5554f8b2a6eb334f4cad89e62d202da5cc43cb9aeafb0
3
+ size 1290026
bioasq_ir_pubmed_corpus_subset.py → generate.py RENAMED
@@ -4,19 +4,23 @@ import pandas as pd
4
  from Bio import Entrez
5
  from retry import retry
6
  from tqdm import tqdm
 
7
 
8
  # provided your NIH credentials
9
- Entrez.email = "***"
10
- Entrez.api_key = "***"
 
 
 
11
 
12
 
13
  # change output file names here if necessary
14
- RAW_EVALUATION_DATASET = "training11b.json"
15
- PATH_TO_PASSAGE_DATASET = "./passages.parquet"
16
- PATH_TO_EVALUATION_DATASET = "./eval.parquet"
17
 
18
  # only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
19
- # set to None to use all passages
20
  MAX_PASSAGES = None
21
 
22
 
@@ -42,43 +46,47 @@ if __name__ == "__main__":
42
  eval_df = eval_df.rename(
43
  columns={
44
  "body": "question",
45
- "documents": "relevant_passages",
46
  "ideal_answer": "answer",
47
  }
48
  )
49
  eval_df.answer = eval_df.answer.apply(lambda x: x[0])
50
  # get abstract id from url
51
- eval_df.relevant_passages = eval_df.relevant_passages.apply(
52
- lambda x: [url.split("/")[-1] for url in x]
53
  )
54
  if MAX_PASSAGES:
55
- eval_df["passage_count"] = eval_df.relevant_passages.apply(lambda x: len(x))
56
  eval_df = eval_df.drop(columns=["passage_count"])
57
 
58
  # remove duplicate passage ids
59
- eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: set(x))
60
- eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: list(x))
61
 
62
  # get all passage ids that are relevant
63
- passage_ids = set().union(*eval_df.relevant_passages)
64
  passage_ids = list(passage_ids)
65
  passages = pd.DataFrame(index=passage_ids)
66
 
67
  for i, passage_id in enumerate(tqdm(passages.index)):
68
  passages.loc[passage_id, "passage"] = get_abstract(passage_id)
69
 
70
- # intermidiate save
71
- if i % 4000 == 0:
72
- passages.to_parquet(PATH_TO_PASSAGE_DATASET)
 
73
 
74
  # filter out the passages whos pmids (pubmed ids) where not available
75
  unavailable_passages = passages[passages["passage"] == "1. "]
76
  passages = passages[passages["passage"] != "1. "]
77
- passages.to_parquet(PATH_TO_PASSAGE_DATASET)
 
78
 
79
  # remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
80
  unavailable_ids = unavailable_passages.index.tolist()
81
- eval_df["relevant_passages"] = eval_df["relevant_passages"].apply(
82
  lambda x: [i for i in x if i not in unavailable_ids]
83
  )
84
- eval_df.to_parquet(PATH_TO_EVALUATION_DATASET)
 
 
 
4
  from Bio import Entrez
5
  from retry import retry
6
  from tqdm import tqdm
7
+ import dask.dataframe as dd
8
 
9
  # provided your NIH credentials
10
+ # read from .json file
11
+ with open("credentials.json") as f:
12
+ credentials = json.load(f)
13
+ Entrez.email = credentials["email"]
14
+ Entrez.api_key = credentials["api_key"]
15
 
16
 
17
  # change output file names here if necessary
18
+ RAW_EVALUATION_DATASET = "./raw_data/training11b.json"
19
+ PATH_TO_PASSAGE_DATASET = "./data/passages.parquet"
20
+ PATH_TO_EVALUATION_DATASET = "./data/test.parquet"
21
 
22
  # only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
23
+ # set to None to use all questions
24
  MAX_PASSAGES = None
25
 
26
 
 
46
  eval_df = eval_df.rename(
47
  columns={
48
  "body": "question",
49
+ "documents": "relevant_passage_ids",
50
  "ideal_answer": "answer",
51
  }
52
  )
53
  eval_df.answer = eval_df.answer.apply(lambda x: x[0])
54
  # get abstract id from url
55
+ eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(
56
+ lambda x: [int(url.split("/")[-1]) for url in x]
57
  )
58
  if MAX_PASSAGES:
59
+ eval_df["passage_count"] = eval_df.relevant_passage_ids.apply(lambda x: len(x))
60
  eval_df = eval_df.drop(columns=["passage_count"])
61
 
62
  # remove duplicate passage ids
63
+ eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: set(x))
64
+ eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: list(x))
65
 
66
  # get all passage ids that are relevant
67
+ passage_ids = set().union(*eval_df.relevant_passage_ids)
68
  passage_ids = list(passage_ids)
69
  passages = pd.DataFrame(index=passage_ids)
70
 
71
  for i, passage_id in enumerate(tqdm(passages.index)):
72
  passages.loc[passage_id, "passage"] = get_abstract(passage_id)
73
 
74
+ # intermediate save
75
+ if i % 1000 == 0:
76
+ dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
77
+
78
 
79
  # filter out the passages whos pmids (pubmed ids) where not available
80
  unavailable_passages = passages[passages["passage"] == "1. "]
81
  passages = passages[passages["passage"] != "1. "]
82
+ passages.index.name = "id"
83
+ dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
84
 
85
  # remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
86
  unavailable_ids = unavailable_passages.index.tolist()
87
+ eval_df["relevant_passage_ids"] = eval_df["relevant_passage_ids"].apply(
88
  lambda x: [i for i in x if i not in unavailable_ids]
89
  )
90
+ eval_df.index.name = "id"
91
+ eval_df = eval_df[["question", "answer", "relevant_passage_ids"]]
92
+ dd.from_pandas(eval_df, npartitions=1).to_parquet(PATH_TO_EVALUATION_DATASET)
requirements.txt CHANGED
@@ -1,11 +1,50 @@
 
 
1
  biopython==1.81
 
 
 
 
 
2
  decorator==5.1.1
 
 
 
 
 
 
 
 
 
 
 
 
3
  numpy==1.26.1
 
4
  pandas==2.1.2
 
 
 
 
 
 
 
 
 
5
  py==1.11.0
 
 
6
  python-dateutil==2.8.2
7
  pytz==2023.3.post1
 
 
8
  retry==0.9.2
9
  six==1.16.0
 
 
 
10
  tqdm==4.66.1
 
11
  tzdata==2023.3
 
 
 
1
+ asttokens==2.4.1
2
+ backcall==0.2.0
3
  biopython==1.81
4
+ click==8.1.7
5
+ cloudpickle==3.0.0
6
+ comm==0.1.4
7
+ dask==2023.10.1
8
+ debugpy==1.8.0
9
  decorator==5.1.1
10
+ exceptiongroup==1.1.3
11
+ executing==2.0.0
12
+ fsspec==2023.10.0
13
+ importlib-metadata==6.8.0
14
+ ipykernel==6.26.0
15
+ ipython==8.16.1
16
+ jedi==0.19.1
17
+ jupyter_client==8.5.0
18
+ jupyter_core==5.4.0
19
+ locket==1.0.0
20
+ matplotlib-inline==0.1.6
21
+ nest-asyncio==1.5.8
22
  numpy==1.26.1
23
+ packaging==23.2
24
  pandas==2.1.2
25
+ parso==0.8.3
26
+ partd==1.4.1
27
+ pexpect==4.8.0
28
+ pickleshare==0.7.5
29
+ platformdirs==3.11.0
30
+ prompt-toolkit==3.0.39
31
+ psutil==5.9.6
32
+ ptyprocess==0.7.0
33
+ pure-eval==0.2.2
34
  py==1.11.0
35
+ pyarrow==13.0.0
36
+ Pygments==2.16.1
37
  python-dateutil==2.8.2
38
  pytz==2023.3.post1
39
+ PyYAML==6.0.1
40
+ pyzmq==25.1.1
41
  retry==0.9.2
42
  six==1.16.0
43
+ stack-data==0.6.3
44
+ toolz==0.12.0
45
+ tornado==6.3.3
46
  tqdm==4.66.1
47
+ traitlets==5.12.0
48
  tzdata==2023.3
49
+ wcwidth==0.2.8
50
+ zipp==3.17.0