File size: 2,948 Bytes
fa4acc1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import json

import pandas as pd
from Bio import Entrez
from retry import retry
from tqdm import tqdm

# provided your NIH credentials
Entrez.email = "***"
Entrez.api_key = "***"


# change output file names here if necessary
RAW_EVALUATION_DATASET = "training11b.json"
PATH_TO_PASSAGE_DATASET = "./passages.parquet"
PATH_TO_EVALUATION_DATASET = "./eval.parquet"

# only use questions that have at most MAX_PASSAGES passages to control the size of the dataset
# set to None to use all passages
MAX_PASSAGES = None


@retry()
def get_abstract(passage_id):
    with Entrez.efetch(
        db="pubmed", id=passage_id, rettype="abstract", retmode="text"
    ) as response:
        # get only the abstract - no metadata
        r = response.read()
        r = r.split("\n\n")
        abstract = max(r, key=len)
        return abstract


if __name__ == "__main__":
    # load the training data containing the questions, answers and the ids of relevant passages
    # but lacks the actual passages
    with open(RAW_EVALUATION_DATASET) as f:
        eval_data = json.load(f)["questions"]

    eval_df = pd.DataFrame(eval_data, columns=["body", "documents", "ideal_answer"])
    eval_df = eval_df.rename(
        columns={
            "body": "question",
            "documents": "relevant_passages",
            "ideal_answer": "answer",
        }
    )
    eval_df.answer = eval_df.answer.apply(lambda x: x[0])
    # get abstract id from url
    eval_df.relevant_passages = eval_df.relevant_passages.apply(
        lambda x: [url.split("/")[-1] for url in x]
    )
    if MAX_PASSAGES:
        eval_df["passage_count"] = eval_df.relevant_passages.apply(lambda x: len(x))
        eval_df = eval_df.drop(columns=["passage_count"])

    # remove duplicate passage ids
    eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: set(x))
    eval_df.relevant_passages = eval_df.relevant_passages.apply(lambda x: list(x))

    # get all passage ids that are relevant
    passage_ids = set().union(*eval_df.relevant_passages)
    passage_ids = list(passage_ids)
    passages = pd.DataFrame(index=passage_ids)

    for i, passage_id in enumerate(tqdm(passages.index)):
        passages.loc[passage_id, "passage"] = get_abstract(passage_id)

        # intermidiate save
        if i % 4000 == 0:
            passages.to_parquet(PATH_TO_PASSAGE_DATASET)

    # filter out the passages whos pmids (pubmed ids) where not available
    unavailable_passages = passages[passages["passage"] == "1. "]
    passages = passages[passages["passage"] != "1. "]
    passages.to_parquet(PATH_TO_PASSAGE_DATASET)

    # remove passages from evaluation dataset whose abstract could not be retrieved from pubmed website
    unavailable_ids = unavailable_passages.index.tolist()
    eval_df["relevant_passages"] = eval_df["relevant_passages"].apply(
        lambda x: [i for i in x if i not in unavailable_ids]
    )
    eval_df.to_parquet(PATH_TO_EVALUATION_DATASET)