# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.  
# SPDX-License-Identifier: CC-BY-NC-4.0

import os
import json
import argparse
from tqdm import tqdm
from DecAF.Datasets.QA.utils import parse_answer


parser = argparse.ArgumentParser(description='Process the retrieved data to fit the format of FiD model')
parser.add_argument("--retrieval_data_path", type=str, 
                    default='/home/xionggm/codes/decode-answer-logical-form/save_dir/Retrieval/pyserini/search_results/QA_CWQ_Freebase_BM25')
parser.add_argument("--mode", type=str, 
                    default='SPQA', help="SPQA or QA or SP")
args = parser.parse_args()

# add
from glob import glob
valid_ids = []
for dataset in ["WebQSP","CWQ"]:
    paths = glob(f"/home/xionggm/codes/LLM_KGQA/save-anno-clean/{dataset.lower()}/**/*.json")
    train_data = [json.load(open(path)) for path in paths]
    train_ids = [d["id"].replace(".P0","") for d in train_data]

    paths = glob(f"/home/xionggm/codes/LLM_KGQA/data/{dataset.lower()}/test/*.json")
    test_data = []
    for path in paths:
        with open(path) as f:
            test_data += json.load(f)
    test_ids = [d["id"].replace(".P0","") for d in test_data]
    
    valid_ids = valid_ids + test_ids + train_ids
valid_ids = set(valid_ids)
print("len(valid_ids)", len(valid_ids))
paths = [
    "data_dir/tasks/QA/WebQSP/train.json",
    "data_dir/tasks/QA/WebQSP/test.json",
    "data_dir/tasks/QA/CWQ/train.json",
    "data_dir/tasks/QA/CWQ/dev.json",
    "data_dir/tasks/QA/CWQ/test.json",
]
lf_map = {}
for p in paths:
    p = "/home/xionggm/codes/decode-answer-logical-form/" + p
    data = json.load(open(p))
    for d in data:
        lf_map[d["QuestionId"]] = d
# ---

for split in ["dev", "train", "test"]:

    file_path = os.path.join(args.retrieval_data_path, f"{split}.json")
    if not os.path.exists(file_path):
        print(f"{file_path} not exists")
        continue
    with open(file_path, "r") as rf:
        data = json.load(rf)

    new_data_qa = []
    new_data_sp = []
    for data_i in tqdm(data):
        # assert "LF_processed" in data_i, "LF_processed not in data_i"
        if data_i["QuestionId"] not in valid_ids:
            continue
        data_i["LF_processed"] = lf_map[data_i["QuestionId"]].get("LF_processed", [])
        if args.mode != "QA":
            if "LF_processed" in data_i:
                new_data_i = {
                    "id": str(data_i["QuestionId"]) + ":SP",
                    "question": "Semantic Parsing: " + data_i["Question"],
                    "answers": data_i["LF_processed"],
                    "ctxs": data_i["ctxs"],
                }
                new_data_sp.append(new_data_i)
            elif split != "train":
                new_data_i = {
                    "id": str(data_i["QuestionId"]) + ":SP",
                    "question": "Semantic Parsing: " + data_i["Question"],
                    "answers": ["none"],
                    "ctxs": data_i["ctxs"],
                }
                new_data_sp.append(new_data_i)
        if args.mode != "SP":
            new_data_i = {
                "id": str(data_i["QuestionId"]) + ":QA",
                "question": "Question Answering: " + data_i["Question"],
                "answers": parse_answer(data_i["Answers"]),
                "ctxs": data_i["ctxs"],
            }
            new_data_qa.append(new_data_i)
    new_data = new_data_qa + new_data_sp

    print("len(new_data)", len(new_data))
    print("len(new_data_sp)", len(new_data_sp))

    output_file = file_path.replace(".json", f"_fid_{args.mode}.json")
    print(f"Saving to {output_file}")
    with open(output_file, "w") as wf:
        json.dump(new_data, wf, indent=2)