from transformers import AutoConfig, AutoModelForSequenceClassification, MBartForConditionalGeneration, XLMRobertaTokenizer, MBartTokenizer
from train_generate import load_data, load_ckgc_data, mbart_lang_to_id
from utils.io import read_pkl
import torch
from torch.nn.utils.rnn import pad_sequence
import os
import torch.nn.functional as F
import torch.nn as nn
from utils.ConstractiveLoss import ContrastiveLossS1
from utils.mine_utils import init_seed
from utils.evaluation import f1_score, eval_bleu, sentence_bleu

def qu0con():
    a = torch.tensor([
        [1,2,3,0,0],
        [4,0,0,0,0],
        [5,6,7,8,9]
    ])
    b = torch.tensor([
        [1,2,3,0,0],
        [4,5,0,0,0],
        [5,6,7,8,0]
    ])
    o = []
    for oa, ob in zip(a, b):
        o.append(torch.cat([oa[oa.ne(0)], ob[ob.ne(0)]]))  # 去0并合并
    out = pad_sequence(o, batch_first=True, padding_value=0)

def conloss():
    b_s, p_s, e_s = 3, 5, 10
    z = torch.randn(3 * 5, 1)
    layer = nn.Linear(1, 10)
    z1 = layer(z)  # [b_s * p_s, e_s]8
    CL = ContrastiveLossS1(b_s)
    loss = CL(z1.reshape(b_s, p_s, e_s))
    print(loss)
    loss.backward()
    print(loss)


def reiloss():
    b_s, p_s, k = 2, 5, 3
    a = torch.randn(b_s, p_s, 10)
    layer = nn.Linear(10, 1)
    b = layer(a).squeeze(-1)   # [b_s, p_s]
    # logits = torch.randn(b_s, p_s)
    logits = b
    rank = torch.randint(0, p_s, (b_s, k))  # [b_s, k]
    loss = torch.gather(logits, dim=1, index=rank)  # [b_s, k]
    l1 = loss.sum(0)  # [k]
    l2 = l1.mean(0)
    print(l2)
    l2.backward()


def reinforcement():
    ls = nn.LogSoftmax(-1)
    b_s, p_s, c_n = 4, 8, 3
    ppls = []
    logits = torch.randn(b_s, p_s)
    rank = torch.randint(0, p_s, (b_s, c_n))
    loss_origin = -torch.gather(ls(logits), dim=1, index=rank)  # [b_s, c_n]

    loss_gen = torch.randn(b_s, 10).sum(-1)
    tk_num = torch.tensor([5, 7, 10, 4])
    ppl = torch.exp(loss_gen / tk_num)
    ppls.append(ppl.mean().item())
    reward = torch.tensor(sum(ppls)/len(ppls)) - ppl
    torch.clamp_(reward, min=-2., max=2.)

    attentions = torch.softmax(torch.randn(b_s, c_n), dim=-1)

    loss = torch.mul(reward, torch.mul(attentions, loss_origin).sum(-1)).mean(-1)
    print(loss)


def test_xlm():
    a = torch.tensor([
        [1,2,3,0,0],
        [4,0,0,0,0],
        [5,6,7,8,9]
    ])
    rank_model_name = r"D:\0MData\pretrain models\infoxlm-base"
    rank_tokenizer = XLMRobertaTokenizer.from_pretrained(rank_model_name)

    config = AutoConfig.from_pretrained(rank_model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(rank_model_name, config=config)
    rank_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(rank_tokenizer))

    output = model(input_ids=a, attention_mask=a.ne(0).long().detach(), output_hidden_states=True, return_dict=True)


    gen_model_name = r"D:\0MData\pretrain models\mbart-large-cc25"
    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)

    sens = [
        "zh_CN 你好，我的名字是谢尔顿。",
        "en_XX Hello, my name is Sheldon."
    ]
    o1 = gen_tokenizer.encode(sens[0])
    o2 = gen_tokenizer.encode(sens[1])
    o3 = rank_tokenizer.encode(sens[0])
    o4 = rank_tokenizer.encode(sens[1])

    output = gen_model()
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id



    print("ok")

def test_mbart():
    from transformers import MBartTokenizer, MBartForConditionalGeneration
    gen_model_name = r"D:\0MData\pretrain models\mbart-large-cc25"
    model = MBartForConditionalGeneration.from_pretrained(gen_model_name)
    tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    inputs = torch.randint(2, 10, (4, 20))
    labels = torch.randint(2, 10, (4, 10))
    inputs[0, 2:5] = 1
    inputs[0, 8:10] = 1
    inputs[1, 2:5] = 1
    inputs[1, 9:10] = 1
    inputs[2, 3:5] = 1
    inputs[2, 7:10] = 1
    labels[0, 6:] = 1
    labels[1, 7:] = 1
    labels[2, 6:] = 1
    labels[3, 8:] = 1
    output = model(input_ids=inputs, attention_mask=inputs.ne(1).long(), decoder_input_ids=labels,
             decoder_attention_mask=labels.ne(1).long(), output_attentions=True)
    pre_attentions = torch.stack(output.cross_attentions, dim=0).mean(0).mean(1)  # [batch_size, target_len, source_len]
    mean_attentions = []
    max_attentions = []
    mask = labels.ne(1).type_as(pre_attentions)  # [batch_size, target_len]
    for i in range(2):
        mean_attentions.append((pre_attentions[:, :, i * 5: (i + 1) * 5].sum(-1) * mask).sum(-1) / mask.sum(-1))
        max_attentions.append((pre_attentions[:, :, i * 5: (i + 1) * 5].sum(-1) * mask).max(-1).values)
    eattentions = torch.softmax(torch.stack(mean_attentions, dim=-1), dim=-1)  # [batch_size, k]
    aattentions = torch.softmax(torch.stack(max_attentions, dim=-1), dim=-1)  # [batch_size, k]

    print(3)

def calppl():
    b_s, s_l = 4, 10
    loss_gen = torch.randn(b_s, s_l).sum(-1)
    tk_num = torch.tensor([5, 7, 10, 4])
    ppl = torch.exp(loss_gen / tk_num)

    loss_origin = torch.randn(b_s, 3)
    loss = torch.mul(ppl.unsqueeze(1).expand(-1, 3), loss_origin)

import torch
from torch.utils.data import DataLoader, Dataset
class TestDataset(Dataset):
    def __init__(self, num=10):
        super().__init__()
        self.sample = []
        for i in range(num):
            self.sample.append([torch.randn(10), torch.tensor([1]).long()])

    def __len__(self):
        return len(self.sample)

    def __getitem__(self, index):
        return self.sample[index]

from accelerate import Accelerator
from transformers import AdamW
from tqdm import tqdm
def test_loader():
    accelerator = Accelerator()
    dataloader1 = DataLoader(TestDataset(15), batch_size=3, shuffle=True)
    dataloader2 = DataLoader(TestDataset(10), batch_size=4, shuffle=True)
    model = nn.Linear(10, 2)
    optimizer = AdamW(model.parameters(), lr=1e-5)
    model, optimizer, data_loader1 = accelerator.prepare(model, optimizer, dataloader1)
    m1, o1, data_loader2 = accelerator.prepare(model, optimizer, dataloader2)
    for j in range(10):
        tk0 = tqdm(zip(data_loader1, data_loader2), total=min(len(data_loader1), len(data_loader2)))
        for batch1, batch2 in tk0:
            print(batch1)
            print(batch2)

# 英文对话+英文知识
# 直接计算回复和知识的f1
# context+knowledge -> gt response's ppl
# context+knowledge -> attention score

# response+knowledge -> score
# context+knowledge -> score

# 评测 warm up 之后模型的效果

from tqdm import tqdm
from utils.io import read_file, read_pkl
def load_datam(context_file, pool_pkl, knowledge_file):
    acc1 = 0
    acc2 = 0
    print(f"load context from {context_file}, load pool from {pool_pkl}, load knowledge from {knowledge_file}.")
    con_contexts = read_file(context_file)
    knowledges = read_file(knowledge_file)
    pool_ids = read_pkl(pool_pkl)
    tag = ["<wizard> ", "<apprentice> "]
    contexts = []
    responses = []
    knowledgess = []
    num = 0
    for id, conc in enumerate(tqdm(con_contexts)):
        con = conc.split("\t")
        topic = con[0]
        response = con[-1]
        context = con[1:-1]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(context) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        contexts.append(con[:-1])
        responses.append(response.lower())

        pool_id = pool_ids[id]
        knowledge = [knowledges[pid] for pid in pool_id]
        knowledgess.append(knowledge)
        f1 = [f1_score(k, [response]) for k in knowledge]
        # f1 = [sentence_bleu([response.split()], k.split(), weights=[0.5, 0.5, 0, 0]) for k in knowledge]
        # sf1 = sorted(f1)
        # if sf1[-1] / max(sf1[-2], 1e-3) < 1.05:
        #     continue
        # if max(f1) < 0.2:
        #     continue
        num += 1
        st = knowledge[0].split('\t')[0]
        ft = knowledge[f1.index(max(f1))].split('\t')[0]
        if f1.index(max(f1)) == 0:
            acc1 += 1
        if ft == st:
            acc2 += 1
        if f1.index(max(f1)) == 0 and ft != st:
            print(f"{ft} == {st}")
    # print(num/len(contexts))
    print(f"total examples {num}")
    print(f"{acc1} / {num} = {acc1/num}")
    print(f"{acc2} / {num} = {acc2/num}")
    return contexts, knowledgess, responses



if __name__ == "__main__":
    # conloss()
    init_seed(123456)
    # reiloss()
    # reinforcement()
    # calppl()
    # test_mbart()
    # test_xlm()
    # test_loader()
    data_path = "./wow/seen/"
    load_datam(f"{data_path}/en.txt", data_path + "pool.pkl", data_path + "0.txt")