import argparse
import json
import os
import pickle
from itertools import chain

import numpy as np
from tqdm import tqdm
from transformers import BartTokenizerFast
from glob import glob

# valid train ids: from {id}.json
paths = glob("../LLM_KGQA/save-anno-clean/kqapro/*/train-*.json")
valid_train_ids = [p.split("/")[-1].split(".")[0] for p in paths]
print(f"len of valid train ids: {len(valid_train_ids)}")

# test id: from json
valid_test_ids = []
paths = glob("../LLM_KGQA/data/kqapro/test/*.json")
for p in paths:
    items = json.load(open(p))
    for d in items:
        valid_test_ids.append(d["id"])
print(f"len of valid test ids: {len(valid_test_ids)}")

valid_ids = set(valid_train_ids + valid_test_ids)
print(f"len of valid ids: {len(valid_ids)}")

"""
对 padding 部分做了修改

# decode 好像有问题
# "".join([tokenizer.convert_ids_to_tokens(i) for i in target_ids["input_ids"][0] if i > 2]).replace("Ġ"," ")
"""

def encode_dataset(dataset, vocab, tokenizer: BartTokenizerFast, split):
    
    # 重要！
    if split == "val":
        dataset = [d for d in dataset if d["id"] in valid_ids]
    print("valid dataset size: {}".format(len(dataset)))
    
    test = True if split == "test" else False

    questions = []
    sparqls = []
    for item in tqdm(dataset):
        question = item["question"]
        questions.append(question)
        if not test:
            sparql = item["sparql"]
            sparqls.append(sparql)

    _questions = tokenizer(questions, padding=True)
    max_question_length = len(_questions["input_ids"][0])
    _sparqls = tokenizer(sparqls, padding=True)
    max_sparql_length = len(_sparqls["input_ids"][0])

    # 上面只是为了得到最大长度，下面才是真正的编码
    questions = []
    sparqls = []
    choices = []
    answers = []

    # add id
    ids = []
    for item in tqdm(dataset):
        ids.append(item["id"])
        question = item["question"]
        questions.append(question)
        _ = [vocab["answer_token_to_idx"][w] for w in item["choices"]]
        choices.append(_)
        if not test:
            sparql = item["sparql"]
            sparqls.append(sparql)
            answers.append(vocab["answer_token_to_idx"].get(item["answer"]))

    input_ids = tokenizer.batch_encode_plus(
        questions,
        max_length=max_question_length,
        padding=True,
        truncation=True,
    )
    source_ids = np.array(input_ids["input_ids"], dtype=np.int32)
    source_mask = np.array(input_ids["attention_mask"], dtype=np.int32)
    if not test:
        target_ids = tokenizer.batch_encode_plus(
            sparqls,
            max_length=max_sparql_length,
            padding=True,
            truncation=True,
        )
        target_ids = np.array(target_ids["input_ids"], dtype=np.int32)
    else:
        target_ids = np.array([], dtype=np.int32)
    choices = np.array(choices, dtype=np.int32)
    answers = np.array(answers, dtype=np.int32)
    return ids, source_ids, source_mask, target_ids, choices, answers


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_dir", required=True)
    parser.add_argument("--output_dir", required=True)
    parser.add_argument("--model_name_or_path", required=True)
    args = parser.parse_args()

    print("Build kb vocabulary")
    vocab = {"answer_token_to_idx": {}}
    print("Load questions")
    train_set = json.load(open(os.path.join(args.input_dir, "train.json")))
    val_set = json.load(open(os.path.join(args.input_dir, "val.json")))
    test_set = json.load(open(os.path.join(args.input_dir, "test.json")))
    for question in chain(train_set, val_set, test_set):
        for a in question["choices"]:
            if not a in vocab["answer_token_to_idx"]:
                vocab["answer_token_to_idx"][a] = len(vocab["answer_token_to_idx"])

    if not os.path.isdir(args.output_dir):
        os.mkdir(args.output_dir)
    fn = os.path.join(args.output_dir, "vocab.json")
    print("Dump vocab to {}".format(fn))
    with open(fn, "w") as f:
        json.dump(vocab, f, indent=2)
    for k in vocab:
        print("{}:{}".format(k, len(vocab[k])))
    tokenizer = BartTokenizerFast.from_pretrained(args.model_name_or_path)
    for name, dataset in zip(("train", "val", "test"), (train_set, val_set, test_set)):
        
        # myadd
        if name in ["train", "test"]:
            continue

        print("Encode {} set".format(name))
        outputs = encode_dataset(dataset, vocab, tokenizer, split=name)
        assert len(outputs) == 6
        print(
            "shape of input_ids of questions, attention_mask of questions, input_ids of sparqls, choices and answers:"
        )
        # save outputs[0] to ids.json
        json.dump(outputs[0], open(os.path.join(args.output_dir, "{}-ids.json".format(name)), "w"))
        outputs = outputs[1:]
        with open(os.path.join(args.output_dir, "{}.pt".format(name)), "wb") as f:
            for o in outputs:
                pickle.dump(o, f)


if __name__ == "__main__":
    main()
