rag-mini-bioasq / generate.py
tillwenke
add dataset
1686162
raw
history blame
3.43 kB
import json
import pandas as pd
from Bio import Entrez
from retry import retry
from tqdm import tqdm
import dask.dataframe as dd
# provided your NIH credentials
# read from .json file
with open("credentials.json") as f:
credentials = json.load(f)
Entrez.email = credentials["email"]
Entrez.api_key = credentials["api_key"]
# change output file names here if necessary
RAW_EVALUATION_DATASET = "./raw_data/training11b.json"
PATH_TO_PASSAGE_DATASET = "./data/passages.parquet"
PATH_TO_EVALUATION_DATASET = "./data/test.parquet"
# only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
# set to None to use all questions
MAX_PASSAGES = None
@retry()
def get_abstract(passage_id):
with Entrez.efetch(
db="pubmed", id=passage_id, rettype="abstract", retmode="text"
) as response:
# get only the abstract - no metadata
r = response.read()
r = r.split("\n\n")
abstract = max(r, key=len)
return abstract
if __name__ == "__main__":
# load the training data containing the questions, answers and the ids of relevant passages
# but lacks the actual passages
with open(RAW_EVALUATION_DATASET) as f:
eval_data = json.load(f)["questions"]
eval_df = pd.DataFrame(eval_data, columns=["body", "documents", "ideal_answer"])
eval_df = eval_df.rename(
columns={
"body": "question",
"documents": "relevant_passage_ids",
"ideal_answer": "answer",
}
)
eval_df.answer = eval_df.answer.apply(lambda x: x[0])
# get abstract id from url
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(
lambda x: [int(url.split("/")[-1]) for url in x]
)
if MAX_PASSAGES:
eval_df["passage_count"] = eval_df.relevant_passage_ids.apply(lambda x: len(x))
eval_df = eval_df.drop(columns=["passage_count"])
# remove duplicate passage ids
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: set(x))
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: list(x))
# get all passage ids that are relevant
passage_ids = set().union(*eval_df.relevant_passage_ids)
passage_ids = list(passage_ids)
passages = pd.DataFrame(index=passage_ids)
for i, passage_id in enumerate(tqdm(passages.index)):
passages.loc[passage_id, "passage"] = get_abstract(passage_id)
# intermediate save
if i % 1000 == 0:
passages.index.name = "id"
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
# filter out the passages whos pmids (pubmed ids) where not available
unavailable_passages = passages[passages["passage"] == "1. "]
passages = passages[passages["passage"] != "1. "]
passages.index.name = "id"
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET)
# remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
unavailable_ids = unavailable_passages.index.tolist()
eval_df["relevant_passage_ids"] = eval_df["relevant_passage_ids"].apply(
lambda x: [i for i in x if i not in unavailable_ids]
)
eval_df.index.name = "id"
eval_df = eval_df[["question", "answer", "relevant_passage_ids"]]
dd.from_pandas(eval_df, npartitions=1).to_parquet(PATH_TO_EVALUATION_DATASET)