import numpy as np
import torch
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from utils import drugseed_bert_ids, drug_names_bert_ids
from gensim.models import Word2Vec
from utils import drug_names
from transformers import BertTokenizer
import random
import copy
from tqdm import tqdm


def add_muti_mask_vec(mask_logits, tokenizer, seedonly=True):  # 把由多个token组成的词语或短语组合起来，得到mask向量平拼在logits后面
    if seedonly:
        dic = drugseed_bert_ids
    else:
        dic = drug_names_bert_ids
    for seed in dic.keys():
        if dic[seed] > len(tokenizer):
            multi_tokens = tokenizer(seed)['input_ids'][1:-1]
            _ = torch.stack([mask_logits[:, each] for each in multi_tokens])
            _ = torch.mean(_, dim=0).unsqueeze(1)
            mask_logits = torch.cat([mask_logits, _], dim=1)
    return mask_logits


def mask_trunk(sent):
    # 一个长度为256的句子中再通过tokenizer后，input_ids大概率会超过25
    # 因此[MASK]有可能被截断，需要找到[MASK]所在的位置后截断一下句子
    li = sent.split()
    mask_idx = li.index("[MASK]")
    li_ = li[max(0, mask_idx - 100): mask_idx + 100]
    return " ".join(li_)


class BoxPlot:
    def __init__(self, datatemplate):
        super(BoxPlot, self).__init__()
        self.datatemplate = datatemplate

    def draw(self, **kwargs):
        data = pd.DataFrame(data={"type": self.datatemplate["type"], "ranks": self.datatemplate["ranks"],
                                  "Epoch": self.datatemplate["epoch"]})
        # sns.set(style='ticks', font_scale=1.6, font="TimesNewRoman")
        f, ax = plt.subplots(figsize=(8, 4), dpi=400)
        # sns.boxplot(x="ranks", y="type", hue="Epoch", data=data, whis=[0, 100], width=.6, palette="vlag")
        sns.boxplot(y="type", x="ranks", data=data, whis=[0, 100], width=.6, palette="vlag", orient="h")
        # Tweak the visual presentation
        ax.xaxis.grid(True)
        ax.set(ylabel="")
        # plt.legend('', frameon=False)
        sns.despine(trim=True, left=True)
        plt.tight_layout()
        # ------------单独画图例-------------------
        # plt.legend(loc=2, bbox_to_anchor=(1, 1))
        # plt.tight_layout()
        # plt.savefig(f"./corps/rankdics/boxplots/Epoch{i}.png")
        plt.show()


# 这个函数用于生成step3mlm的训练集，也就是【单个mask，label，50%mask，label】
def prepare_traincorp(filepath, isusew2v=False):
    corp = pickle.load(open(filepath, "rb"))  # [["sentenc mask str", "mask"]]
    if isusew2v:
        vector_size = 100
        inputtexts = list(set([each[0].replace("[MASK]", each[1]) for each in corp]))
        inputtexts = [each.split(" ") for each in inputtexts]
        w2vmodel = Word2Vec(sentences=inputtexts, vector_size=vector_size, window=5, min_count=1, workers=4)
        cnt = 0
        drugvec = np.zeros((vector_size))
        for drug_name in drug_names:
            if drug_name in w2vmodel.wv.index_to_key:
                vector = w2vmodel.wv[drug_name]
                drugvec += vector
                cnt += 1
        drugvec /= cnt
        sims = w2vmodel.wv.similar_by_vector(drugvec, topn=1000)
        pick = []
        for each in sims:
            if each[0] not in drug_names:
                pick.append(each[0])
        output = []
        for each in corp:
            if each[1] in pick:
                output.append(each)
        corp = output

    outputs = []
    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    for smp in tqdm(corp):
        sent = smp[0].replace("[MASK]", smp[1])
        li = smp[0].split(" ")
        tokens_ = tokenizer.tokenize(sent)
        if len(tokens_) > 254:
            continue
        targetidx = li.index("[MASK]")
        msksent = smp[0]
        labels = []
        for each in tokenizer.tokenize(smp[1]):
            target = copy.deepcopy(li)
            target[targetidx] = each
            labels.append(tokenizer.convert_tokens_to_string(target))

        for l_ in range(len(labels)):
            for each in random.sample(range(len(tokens_)), int(len(tokens_) * 0.5)):
                tokens_[each] = "[MASK]"
            tailmsk = tokenizer.convert_tokens_to_string(tokens_)
            if len(tokenizer.tokenize(msksent)) != len(tokenizer.tokenize(labels[l_])) or len(
                    tokenizer.tokenize(tailmsk)) != len(tokenizer.tokenize(sent)):
                continue
            outputs.append([msksent, labels[l_], tailmsk, sent])
    return outputs
