rag-mini-bioasq / bioasq_ir_pubmed_corpus_subset.py
tillwenke
init with scrip for ds generation
fa4acc1
raw
history blame
2.95 kB
import json
import pandas as pd
from Bio import Entrez
from retry import retry
from tqdm import tqdm
# provided your NIH credentials
Entrez.email = "***"
Entrez.api_key = "***"
# change output file names here if necessary
RAW_EVALUATION_DATASET = "training11b.json"
PATH_TO_PASSAGE_DATASET = "./passages.parquet"
PATH_TO_EVALUATION_DATASET = "./eval.parquet"
# only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
# set to None to use all passages
MAX_PASSAGES = None
@retry()
def get_abstract(passage_id):
with Entrez.efetch(
db="pubmed", id=passage_id, rettype="abstract", retmode="text"
) as response:
# get only the abstract - no metadata
r = response.read()
r = r.split("\n\n")
abstract = max(r, key=len)
return abstract
if __name__ == "__main__":
# load the training data containing the questions, answers and the ids of relevant passages
# but lacks the actual passages
with open(RAW_EVALUATION_DATASET) as f:
eval_data = json.load(f)["questions"]
eval_df = pd.DataFrame(eval_data, columns=["body", "documents", "ideal_answer"])
eval_df = eval_df.rename(
columns={
"body": "question",
"documents": "relevant_passages",
"ideal_answer": "answer",
}
)
eval_df.answer = eval_df.answer.apply(lambda x: x[0])
# get abstract id from url
eval_df.relevant_passages = eval_df.relevant_passages.apply(
lambda x: [url.split("/")[-1] for url in x]
)
if MAX_PASSAGES:
eval_df["passage_count"] = eval_df.relevant_passages.apply(lambda x: len(x))
eval_df = eval_df.drop(columns=["passage_count"])
# remove duplicate passage ids
eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: set(x))
eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: list(x))
# get all passage ids that are relevant
passage_ids = set().union(*eval_df.relevant_passages)
passage_ids = list(passage_ids)
passages = pd.DataFrame(index=passage_ids)
for i, passage_id in enumerate(tqdm(passages.index)):
passages.loc[passage_id, "passage"] = get_abstract(passage_id)
# intermidiate save
if i % 4000 == 0:
passages.to_parquet(PATH_TO_PASSAGE_DATASET)
# filter out the passages whos pmids (pubmed ids) where not available
unavailable_passages = passages[passages["passage"] == "1. "]
passages = passages[passages["passage"] != "1. "]
passages.to_parquet(PATH_TO_PASSAGE_DATASET)
# remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
unavailable_ids = unavailable_passages.index.tolist()
eval_df["relevant_passages"] = eval_df["relevant_passages"].apply(
lambda x: [i for i in x if i not in unavailable_ids]
)
eval_df.to_parquet(PATH_TO_EVALUATION_DATASET)