import os.path
import time

from tqdm import tqdm

import queue
from read_data import *
from leiden import StructuralAwareLeiden, AlignAlgorithm, ParticleSwarmOptimization, OldLeiden
import leiden
import numpy as np
from semantic_similarity import BGE, TFIDF, SentenceBert, MyReferenceSimilarity, BertScore, Rouge, MyBM25, TextDistance, \
    NLTK, WordMover, BleuRT, BGEm3, BGEReranker, SIMCSE, LLMEmbed, MoverScore

import argparse
import logging

logging.basicConfig(filename='inference.log', level=logging.INFO, filemode='w',
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

parser = argparse.ArgumentParser()
parser.add_argument("--data_file", type=str)
parser.add_argument("--bge_model", type=str, default="base")
parser.add_argument("--align_method", type=str, choices=["greedy", "dp", "leiden", "pso", "old_leiden"], default="dp")
parser.add_argument("--similarity_model", type=str, nargs='+',
                    choices=["bge_m3", "bge_reranker", "bge", "tfidf", "simcse", "sentencebert", "myreference",
                             "bertscore", "bm25", "rouge", "bleu", "llm",
                             "meteor", "lcs", "jaccard", "wordmover", "moverscore", "bleurt"], default="bge")
parser.add_argument("--pso_initial", type=str, choices=["dp", "random", "greedy", "accumulate", "all"], default="all")
parser.add_argument("--gamma", type=float, default=2.)
parser.add_argument("--use_community_sim", action="store_true")
parser.add_argument("--use_dp", action="store_true")
parser.add_argument("--m3_encoding_method", choices=["dense", "sparse", "colbert", "all"], default="all")
parser.add_argument("--simcse_model", choices=["unsup", "sup"], default="sup")
parser.add_argument("--use_bidirection", action="store_true")
parser.add_argument("--num_particles", type=int, default=40)
parser.add_argument("--num_iterations", type=int, default=50)

args = parser.parse_args()


def precision(golds, preds, paragraphs):
    right_scores = []
    right_num = 0
    gold_num = 0
    for gold, pred, paragraph in zip(golds, preds, paragraphs):
        right = sum(int(gold[node] == pred[node]) for node in gold)
        right_score = right / float(len(gold))
        if right_score < 0.9 and args.align_method != "old_leiden":
            logging_error(paragraph, gold, pred)
        right_scores.append(right_score)
        right_num += right
        gold_num += len(gold)
    return {"macro": np.average(right_scores), "micro": right_num / float(gold_num)}


class Case:
    def __init__(self, score, item):
        self.score = score
        self.item = item

    def __lt__(self, other):
        return self.score > other.score


def case_study(gold, cand_1, cand_2):
    right_1 = sum(int(gold[node] == cand_1[node]) for node in gold)
    right_2 = sum(int(gold[node] == cand_2[node]) for node in gold)
    return right_1 - right_2


def logging_error(paragraph: Paragraph, gold, predicted):
    def list_equal(l1, l2):
        if len(set(l2).difference(set(l1))) > 0:
            return False
        return True

    classic_dict = {}
    for com_id, classic in enumerate(paragraph.classics):
        classic_dict["text"] = classic
        classic_dict["gold"] = [paragraph.annotations[an_id] for an_id in gold if gold[an_id] == com_id]
        classic_dict["pred"] = [paragraph.annotations[an_id] for an_id in predicted if predicted[an_id] == com_id]
        if len(classic_dict["gold"]) > 0 and not list_equal(classic_dict["pred"], classic_dict["gold"]):
            logger.info("text:" + classic)
            logger.info("gold:" + "。".join(classic_dict["gold"]))
            logger.info("pred:" + "。".join(classic_dict["pred"]))


def align(fname):
    if "all" == fname:
        data = read_sishu("data/sishu.json") + read_mengzi_liji("data/mengzi.jsonl") + read_mengzi_liji(
            "data/liji.jsonl")  + read_mengzi_liji("data/xiaojing.jsonl")
    elif "sishu" in fname:
        data = read_sishu(fname)
        bookname = "sishu"
    elif "liji" in fname:
        data = read_mengzi_liji(fname)
        bookname = "liji"
    elif "mengzi" in fname:
        data = read_mengzi_liji(fname)
        bookname = "mengzi"
    elif "zhuangzi" in fname:
        data = read_zhuangzi(fname)
        bookname = "zhuangzi"
    elif "xiaojing" in fname:
        data = read_mengzi_liji(fname)
        bookname = "xiaojing"
    else:
        raise ValueError("Unsupported file type")
    sim_model = {}
    for sim_model_name in args.similarity_model:
        if sim_model_name == "bge":
            if args.bge_model == "base":
                sim_model[sim_model_name] = BGE("../bge-base-zh-v1.5")
            elif args.bge_model == "base-exegesis":
                sim_model[sim_model_name] = BGE("../bge-base-zh-v1.5/all_exegesis")
            elif args.bge_model == "small":
                sim_model[sim_model_name] = BGE("../bge-small-zh-v1.5")
            elif args.bge_model == "small-exegesis":
                sim_model[sim_model_name] = BGE("../bge-small-zh-v1.5/all_exegesis")
            elif args.bge_model == "large":
                sim_model[sim_model_name] = BGE("../bge-large-zh-v1.5")
            else:
                sim_model[sim_model_name] = BGE(os.path.join("../bge-large-zh-v1.5", args.bge_model))
        elif sim_model_name == "bge_m3":
            if args.bge_model == "base":
                sim_model[sim_model_name] = BGEm3("../bge-m3", args.m3_encoding_method)
            else:
                sim_model[sim_model_name] = BGEm3(os.path.join("../bge-m3", args.bge_model),
                                                  args.m3_encoding_method)
        elif sim_model_name == "bge_reranker":
            if args.bge_model == "base":
                sim_model[sim_model_name] = BGEReranker("../bge-reranker-v2-m3")
            else:
                sim_model[sim_model_name] = BGEReranker(os.path.join("../bge-reranker-v2-m3", args.bge_model))
        elif sim_model_name == "llm":
            sim_model[sim_model_name] = LLMEmbed("../llm-embedder")
        elif sim_model_name == "tfidf":
            sim_model[sim_model_name] = TFIDF()
        elif sim_model_name == "simcse":
            sim_model[sim_model_name] = SIMCSE(
                "roberta-" + args.simcse_model + "-simcse-classical-base")
        elif sim_model_name == "sentencebert":
            sim_model[sim_model_name] = SentenceBert(path='plms/distiluse-base-multilingual-cased-v1')
        elif sim_model_name == "bertscore":
            sim_model[sim_model_name] = BertScore("roberta-classical-chinese-base-char")
        elif sim_model_name == "rouge":
            sim_model[sim_model_name] = Rouge()
        elif sim_model_name == "lcs":
            sim_model[sim_model_name] = TextDistance("lcs")
        elif sim_model_name == "jaccard":
            sim_model[sim_model_name] = TextDistance("jaccard")
        elif sim_model_name == "myreference":
            sim_model[sim_model_name] = MyReferenceSimilarity("roberta-" + args.simcse_model + "-simcse-classical-base")
        elif sim_model_name == "bm25":
            sim_model[sim_model_name] = MyBM25()
        elif sim_model_name == "wordmover":
            sim_model[sim_model_name] = WordMover("plms/sgns.sikuquanshu.word")
        elif sim_model_name == "moverscore":
            sim_model[sim_model_name] = MoverScore()
        elif sim_model_name == "bleu":
            sim_model[sim_model_name] = NLTK("bleu")
        elif sim_model_name == "meteor":
            sim_model[sim_model_name] = NLTK("meteor")
        elif sim_model_name == "bleurt":
            sim_model[sim_model_name] = BleuRT("plms/BLEURT-20")
        else:
            raise Exception("wrong model type")
    golds = []
    preds = []
    paragraphs = []
    study_cases = queue.PriorityQueue()
    start_time = time.time()
    for pid in tqdm(range(len(data))):
        paragraph = data[pid]
        if len(paragraph.annotation_ids) == 0:
            # 没有注释就不应该计算
            continue
        graph, all_sim = leiden.build_graph_from_doc(paragraph.classics, paragraph.annotations, gamma=args.gamma,
                                                     sim_model=sim_model)
        if args.align_method == "dp" or args.align_method == "greedy":
            process = AlignAlgorithm(graph, classics=paragraph.classics, annotations=paragraph.annotations)
        elif args.align_method == "leiden":
            process = StructuralAwareLeiden(graph, classics=paragraph.classics, annotations=paragraph.annotations, sim_model=sim_model,
                                            use_community_sim=args.use_community_sim, use_bidirection=args.use_bidirection,
                                            use_dp=args.use_dp)
        elif args.align_method == "old_leiden":
            process = OldLeiden(graph, classics=paragraph.classics, annotations=paragraph.annotations)
        elif args.align_method == "pso":
            process = ParticleSwarmOptimization(graph, classics=paragraph.classics, annotations=paragraph.annotations,
                                                num_particles=args.num_particles, num_iterations=args.num_iterations,
                                                sim_model=sim_model, use_community_sim=args.use_community_sim)
            predicted_communities = process.forward(all_sim, args.pso_initial)
        if args.align_method == "dp" or args.align_method == "old_leiden":
            predicted_communities = process.forward(all_sim)
        elif args.align_method == "greedy":
            predicted_communities = process.greedy(paragraph.classics, paragraph.annotations, all_sim)
            gold_communities = process.position_to_community(paragraph.annotation_ids)
            study_cases.put(Case(0, {"classics": paragraph.classics, "annotations": paragraph.annotations,
                                              "gold": gold_communities,
                                              "leiden": predicted_communities}))
        elif args.align_method == "leiden":
            predicted_communities, original_communities = process.forward(all_sim)
            predicted_ids = process.community_to_id(predicted_communities)
            dp_ids = process.community_to_id(original_communities)
            case_score = case_study(paragraph.annotation_ids, predicted_ids, dp_ids)
            if case_score > 0:
                gold_communities = process.position_to_community(paragraph.annotation_ids)
                study_cases.put(Case(case_score, {"classics": paragraph.classics, "annotations": paragraph.annotations,
                                                  "gold": gold_communities,
                                                  "leiden": predicted_communities, "dp": original_communities}))

        predicted_ids = process.community_to_id(predicted_communities)
        golds.append(paragraph.annotation_ids)
        preds.append(predicted_ids)
        paragraphs.append(paragraph)
    end_time = time.time()
    precision_score = precision(golds, preds, paragraphs)
    print("macro precision %.4f; micro precision %.4f" % (precision_score["macro"], precision_score["micro"]))
    print("average time cost %.4f"%((end_time - start_time)/len(golds)))
    with open("case_studies.txt", "w") as writer:
        while not study_cases.empty():
            item = study_cases.get()
            writer.write(f"Priority: {item.score}\n")
            writer.write(f"classic:\n{"||".join([str(i) + "-" + s for i, s in enumerate(item.item["classics"])])}\n")
            writer.write(
                f"annotation:\n{"||".join([str(i+len(item.item["classics"])) + "-" + s for i, s in enumerate(item.item["annotations"])])}\n")
            writer.write(f"gold:\n{"||".join([str(i) for i in item.item["gold"]])}\n")
            writer.write(f"leiden:\n{"||".join([str(i) for i in item.item["leiden"]])}\n")
            #writer.write(f"dp:\n{"||".join([str(i) for i in item.item["dp"]])}\n")
            writer.write("\n\n")


if __name__ == "__main__":
    logger.info(" ".join([str(k) + ":" + str(v) for k, v in vars(args).items()]))
    align(fname=args.data_file)
