import os
import pickle as pkl
import argparse
import re
import json
import nltk
from collections import defaultdict

# import matplotlib.pyplot as plt

# Train/test/dev: 18989/2000/2000


def build_mid2ent(in_dir):
    mid2ent = {}
    with open(os.path.join(in_dir, "ComplexWebQuestions_train.json"), "r") as f:
        ComplexWebQuestions_train = json.load(f)

    # with open(os.path.join(in_dir, 'ComplexWebQuestions_test.json'),'r') as f:
    #     ComplexWebQuestions_test = json.load(f)

    with open(os.path.join(in_dir, "ComplexWebQuestions_dev.json"), "r") as f:
        ComplexWebQuestions_dev = json.load(f)

    for dct in ComplexWebQuestions_train + ComplexWebQuestions_dev:
        for ans in dct["answers"]:
            if ans["answer"] != None:
                mid2ent["/" + ans["answer_id"].replace(".", "/")] = [ans["answer"]]

    return mid2ent  # 这里就是找数据集答案提到的mid，一共5774个


def generate_answer_nl(in_dir, out_dir):
    with open(os.path.join(in_dir, "entity_dict.txt"), "r") as f:
        entity_dict = f.readlines()

    with open(os.path.join(in_dir, "subgraph_answer.txt"), "r") as f:
        subgraph_answer = f.readlines()

    node1 = []
    node2 = []
    edge = []
    ans_list = []
    for line in subgraph_answer:
        line = line.replace("￨O", "")
        line = line.replace("￨A", "|A")
        triples = line.split("<t>")
        subjs = ""
        objs = ""
        preds = ""
        ans = []
        for triple in triples:
            subj, pred, obj = triple.strip().split()
            if "|A" in subj:
                ans.append(int(subj[:-3]))
            if "|A" in obj:
                ans.append(int(obj[:-3]))
        ans_list.append(list(set(ans)))

    new_entity_dict = {}
    for line in entity_dict:
        line = line.strip()
        k, v = line.split("\t")
        new_entity_dict[int(k)] = "/" + v.replace(".", "/")

    mid2ent_hand = build_mid2ent(in_dir)

    with open(os.path.join(in_dir, "mid2ents.pkl"), "rb") as f3:
        mid2ents = pkl.load(f3)

    with open(os.path.join(in_dir, "mid2ents_extra.pkl"), "rb") as f4:
        mid2ents_extra = pkl.load(f4)

    merge_mid2ents = {**mid2ent_hand, **mid2ents, **mid2ents_extra}
    print("Num of merged mid2ents: {}".format(len(merge_mid2ents)))

    new_ans_list = []
    miss1 = 0
    miss2 = 0
    cnt = 0
    cnt_tmp_ans = 0
    for ans in ans_list:
        cnt += len(ans)
        tmp = []
        tmp_id = []
        for x in ans:
            if x == 0:
                miss1 += 1
                continue

            tmp_id.append(new_entity_dict[x])
            if new_entity_dict[x] in merge_mid2ents.keys():
                tmp.append(merge_mid2ents[new_entity_dict[x]][0])
            else:
                miss2 += 1
        new_ans_list.append([tmp, tmp_id])
        if len(tmp) == 0:
            cnt_tmp_ans += 1

    # with open(os.path.join(out_dir, 'answer_list.txt'),'w+') as f:
    #     for line, line_id in new_ans_list:
    #         f.write(' | '.join(line).lower()+'\n')

    print("generate_answer_nl")
    print("miss1", miss1)
    print("miss2", miss2)
    print("cnt", cnt)
    print("cnt_tmp_ans", cnt_tmp_ans)
    return new_ans_list


def build_input_seq(in_dir, out_dir):
    mid2ent_hand = build_mid2ent(in_dir)

    # with open('subgraph_answer.txt','r') as f1:
    #     subgraph_answer = f1.readlines()

    with open(os.path.join(in_dir, "subgraph.txt"), "r") as f2:
        subgraphf = f2.readlines()

    with open(os.path.join(in_dir, "mid2ents.pkl"), "rb") as f3:
        mid2ents = pkl.load(f3)  # 300W实体 295mb

    with open(os.path.join(in_dir, "mid2ents_extra.pkl"), "rb") as f4:
        mid2ents_extra = pkl.load(f4)  # 20W 额外实体

    merge_mid2ents = {**mid2ent_hand, **mid2ents, **mid2ents_extra}
    print("Num of merged mid2ents: {}".format(len(merge_mid2ents)))  # 加起来一共 3394778

    miss_cnt = 0
    miss_list = []
    all_ents = []
    num_triples = []
    all_input_seqs = []

    for idx, line in enumerate(subgraphf):
        triple_list = line.strip().split("<t>")  # 就是三元组
        new_triple_list = []
        tmp_dict = {}
        unk_ent_cnt = 1
        unk_ent_dict = {}

        g_node_names = {}
        # g_node_types = {}
        g_edge_types = {}
        g_adj = defaultdict(dict)

        for triple in triple_list:
            # if triple=='' or len(triple.split())!=3:
            if triple == "":
                break

            elements = triple.split()
            assert "m." in elements[0] or "g." in elements[0]

            if len(elements) != 3:
                subj = elements[0]
                pred = elements[1]
                obj = " ".join(elements[2:])
            else:
                subj, pred, obj = elements

            all_ents.append(subj)
            all_ents.append(obj)
            subj = "/" + subj.replace(".", "/")
            if "/m/" in subj or "/g/" in subj:
                if subj in tmp_dict.keys() or subj in merge_mid2ents.keys():
                    tmp_dict[subj] = merge_mid2ents[subj][0]
                    subj_name = tmp_dict[subj]
                else:
                    miss_cnt += 1
                    miss_list.append(subj)
                    if subj in unk_ent_dict.keys():
                        subj_name = unk_ent_dict[subj]
                    else:
                        # unk_ent_dict[subj] = 'unk_ent_'+str(unk_ent_cnt)
                        unk_ent_dict[subj] = "none"
                        subj_name = unk_ent_dict[subj]
                        unk_ent_cnt += 1

            pred = pred.split(".")[-1]
            pred = " ".join(pred.replace("_", " ").split())

            if "m." in obj or "g." in obj:
                obj = "/" + obj.replace(".", "/")
                if obj in tmp_dict.keys() or obj in merge_mid2ents.keys():
                    tmp_dict[obj] = merge_mid2ents[obj][0]
                    obj_name = tmp_dict[obj]
                else:
                    miss_cnt += 1
                    miss_list.append(obj)

                    if obj in unk_ent_dict.keys():
                        obj_name = unk_ent_dict[obj]
                    else:
                        # unk_ent_dict[obj] = 'unk_ent_'+str(unk_ent_cnt)
                        unk_ent_dict[obj] = "none"
                        obj_name = unk_ent_dict[obj]
                        unk_ent_cnt += 1
            else:
                obj = " ".join(obj.split("-"))
                obj_name = obj

            new_triple = [subj_name, pred, obj_name]
            new_triple_list.extend(new_triple)

        all_input_seqs.append(" ".join(new_triple_list))

    print("build_input_seq")
    return all_input_seqs


def process_querys(in_dir, out_dir):
    with open(os.path.join(in_dir, "querys.txt"), "r") as f:
        querys = f.readlines()

    with open(os.path.join(in_dir, "ent_dict_list.txt"), "r") as f:
        ent_dict_list = f.readlines()

    new_ent_dict_list = []
    for line in ent_dict_list:
        try:
            new_line = eval(line)
        except:
            a = 0
        new_ent_dict_list.append(new_line)

    new_queries = []
    for idx, line in enumerate(querys):
        for k, v in new_ent_dict_list[idx].items():
            line = line.replace(k, v[0])

        line = " ".join(nltk.word_tokenize(line.lower()))
        new_queries.append(line)

    # with open(os.path.join(out_dir, 'new_queries.txt'),'w+') as f:
    #     for line in new_queries:
    #         line = ' '.join(nltk.word_tokenize(line))+'\n'
    #         f.write(line)
    return new_queries


def prepare_output_data(input_seqs, answer_lists, queries, out_dir):
    count = 0
    with open(os.path.join(out_dir, "data.seq.json"), "w") as outf:
        for i in range(len(input_seqs)):
            example = {}
            example["answers"] = answer_lists[i][0]
            example["outSeq"] = queries[i]
            example["qId"] = count + 1
            example["inSeq"] = input_seqs[i]

            outf.write(json.dumps(example) + "\n")
            count += 1
    return count


def prepare_output_data_example_per_line(input_seqs, answer_lists, queries, out_dir):
    count = 0
    with open(os.path.join(out_dir, "src.txt"), "w") as outf:
        for i in range(len(input_seqs)):
            outf.write(input_seqs[i] + "\n")

    with open(os.path.join(out_dir, "tgt.txt"), "w") as outf:
        for i in range(len(queries)):
            outf.write(queries[i] + "\n")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-i",
        "--input_dir",
        default="data/mhqg-wq/raw",
        type=str,
        help="path to the input dir",
    )
    parser.add_argument(
        "-o", "--output_dir", default="./out_p1", type=str, help="path to the output dir"
    )
    opt = vars(parser.parse_args())

    new_input_seqs = build_input_seq(opt["input_dir"], opt["output_dir"])
    new_ans_list = generate_answer_nl(opt["input_dir"], opt["output_dir"])  # 后续根本没用
    new_queries = process_querys(opt["input_dir"], opt["output_dir"])
    assert len(new_input_seqs) == len(new_ans_list) == len(new_queries)

    # prepare_output_data(new_input_seqs, new_ans_list, new_queries, opt['output_dir'])
    prepare_output_data_example_per_line(
        new_input_seqs, new_ans_list, new_queries, opt["output_dir"]
    )
