import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from transformers import AutoConfig, AutoModelForSequenceClassification, MBartForConditionalGeneration, XLMRobertaTokenizer, MBartTokenizer
import os
import random
from tqdm import tqdm
from transformers import AdamW, get_linear_schedule_with_warmup
from accelerate import Accelerator
from train_generate import load_ckgc_data, mbart_lang_to_id
from utils.evaluation import f1_score, eval_all
from utils.io import write_file, read_pkl, read_file, read_json
mbart_lang_to_lang = {'en': 'en_XX', 'es': 'es_XX', 'fr': 'fr_XX', 'zh': 'zh_CN'}
from utils.mine_utils import init_seed, random_binary, random_index
from utils.ConstractiveLoss import ContrastiveLossELI5 as ConLoss1, ContrastiveLossS1 as ConLoss2

from models import JointModel
from data import MKGCData, MKGCGenerateData, MKGCRankData


def load_data_fromone(context_file, pool_pkl, knowledge_file, knowledge_language):
    print(f"load context from {context_file}, load pool from {pool_pkl}, load knowledge from {knowledge_file}")
    con_contexts = read_file(context_file)
    knowledges = read_file(knowledge_file)
    pool_ids = read_pkl(pool_pkl)
    tag = ["<wizard> ", "<apprentice> "]
    contexts = []
    responses = []
    knowledgess = []
    for id, conc in enumerate(con_contexts):
        con = conc.split("\t")
        topic = con[0]
        response = con[-1]
        context = con[1:-1]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(context) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        pool_id = pool_ids[id]
        if len(pool_id) < 2:  # 去除只有一条知识的样例
            continue
        knowledge = [f"{mbart_lang_to_lang[knowledge_language]} <topic> " + knowledges[pid].split("\t")[0].lower() + " </s> "
                      + knowledges[pid].split("\t")[1].lower() for pid in pool_id]

        contexts.append(con[:-1])
        responses.append(response.lower())
        knowledgess.append(knowledge)
    print(f"{knowledge_language} total examples {len(contexts)}")
    return contexts, knowledgess, responses


def load_data_fromtwo(context_file, pool_pkls, knowledge_files, knowledge_languages, align_file,
                      strict_align=True, align_threshold=0.25, random_delete=False, delete_threshold=0.7):
    # strict_align：是否将两种语言知识库的label知识进行严格对齐，以用于后续的对比学习
    # random_delete：是否以一定的概率随机删除英文知识库中的label知识
    # 显然strict_align和random_delete最好不要同时为true或同时为false，因为彼此具有一定的矛盾性
    con_contexts = read_file(context_file)
    aligns = read_json(align_file)
    knowledges = [read_file(knowledge_file) for knowledge_file in knowledge_files]
    pool_ids = [read_pkl(pool_pkl) for pool_pkl in pool_pkls]
    print(f"load context from {context_file}, load align from {align_file}")
    print(f"load pool from {pool_pkls}, load knowledge from {knowledge_files}, knowledge language is {knowledge_languages}")

    tag = ["<wizard> ", "<apprentice> "]
    contexts = []
    responses = []
    knowledgess = []
    keep = 0
    for id, conc in enumerate(con_contexts):
        con = conc.split("\t")
        topic = con[0]
        response = con[-1]
        context = con[1:-1]
        flag = 0 if len(context) % 2 == 0 else 1
        for i in range(1, len(context) + 1):
            con[i] = (tag[flag] + con[i] + " </s>").lower()
            flag = (flag + 1) % 2
        con[0] = f"<topic> {topic} </s>"

        tknowledge = []
        select_topic = knowledges[0][pool_ids[0][id][0]].split("\t")[0]
        if strict_align:  # 如果 strict_align 设置为 true 时
            if select_topic not in aligns:  # 去除没有 select topic 对应关系的样例（包括英文知识选了 nopassageused 的样例）
                continue
            if knowledge_languages[1] == "zh":
                f1 = f1_score(' '.join(knowledges[1][pool_ids[1][id][0]]), [' '.join(response)])
            else:
                f1 = f1_score(knowledges[1][pool_ids[1][id][0]], [response])
            if f1 < align_threshold:  # 去除有对应关系，但是相似度过低的样例（包括中文知识选了 nopassageused 的样例）
                continue
            # 较大的threshold（如0.35）可以用来做in-sample constrictive learning，此时负样例是hard的
            # 较小的threshold（如0.25）可以用来做in-batch constrictive learning，此时负样例不是hard的
            # strict_align 设置为 True 确保了样例中，中英文知识是对齐的，可以通过对比学习来优化语义空间

        for pids, ks, kl in zip(pool_ids, knowledges, knowledge_languages):
            pool_id = pids[id]
            ck = [f"{mbart_lang_to_lang[kl]} <topic> " + ks[pid].split("\t")[0].lower() + " </s> " + ks[pid].split("\t")[1].lower() for pid in pool_id]
            tknowledge.append(ck)
        if random_delete and random_binary(delete_threshold):  # 以delete_threshold的概率删除英文label知识，如果random_delete为true
            tknowledge[0].pop(0)
        else:
            keep += 1
        # 交错融合两种语言的知识
        knowledge = []
        m, n = 0, 0
        lm, ln = len(tknowledge[0][:-1]), len(tknowledge[1][:-1])
        number = random.randint(0, 1)
        while m < lm and n < ln:
            knowledge.append(tknowledge[number][m])
            number = (number + 1) % 2
            knowledge.append(tknowledge[number][n])
            number = (number + 1) % 2
            m += 1
            n += 1
        if m == lm:
            knowledge.extend(tknowledge[1][n:])
        else:
            knowledge.extend(tknowledge[0][m:])
        if len(knowledge) < 2:  # 去除 knowledge_pool_size 小于 2 的样例
            continue
        contexts.append(con[:-1])
        responses.append(response.lower())
        knowledgess.append(knowledge)
    print(f"{knowledge_languages} total examples {len(contexts)}")
    return contexts, knowledgess, responses





def train_generate(language):
    # 用双语言知识对 generate 模型进行训练
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 10
    batch_size = 6
    lr = 5e-5
    con_num = 3
    neg_num = 6 # poo_size: neg_num+2

    # gen_model_name = r"D:\0MData\pretrain models\mbart-large-cc25"
    gen_model_name = r"../pretrain_models/mbart-large-cc25"
    gen_ckpt_name = f"mbart-largecc25-generate-warmup-{language}"
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id

    print(f"epochs {epochs}, batch_size {batch_size}, mode train_gen1")
    print(f"gen model name '{gen_model_name}', gen ckpt name '{gen_ckpt_name}', dialogue language '{language}'")

    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))

    model = gen_model

    data_path = f"./wow/train/"
    # 对于中文对话，用中文知识、用英文知识、用中英文混合知识对模型进行 warm up
    # context1, knowledge1, response1 = load_data_fromone(f"{data_path}/{language}.txt", f"{data_path}/pool.pkl",
    #                                                     f"{data_path}/0.txt", "en")
    # context2, knowledge2, response2 = load_data_fromone(f"{data_path}/{language}.txt", f"{data_path}/pool_{language}.pkl",
    #                                                     f"{data_path}/0_{language}.txt", language)
    context3, knowledge3, response3 = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                        [data_path + "pool.pkl", data_path + f"pool_{language}.pkl"],
                                                        [data_path + "0.txt", data_path + f"0_{language}.txt"],
                                                        ["en", language], f"{data_path}/en2{language}.json",
                                                        strict_align=False, random_delete=False)
    # context = context1 + context2 + context3
    # knowledge = knowledge1 + knowledge2 + knowledge3
    # response = response1 + response2 + response3
    context = context3
    knowledge = knowledge3
    response = response3
    dataset = MKGCGenerateData(context, knowledge, response, gen_tokenizer, generate_mode=2, context_len=256,
                               response_len=128, lang_code=mbart_lang_to_id[language], con_num=con_num,
                               neg_num=neg_num, pad_none=True, shuffle=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    optimizer = AdamW(model.parameters(), lr=lr)
    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)
    # 训练generate
    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        for batch in tk0:
            predict = model(input_ids=batch["context"], attention_mask=batch["context_mask"],
                            decoder_input_ids=batch["response"], decoder_attention_mask=batch["response_mask"],
                            return_dict=True)["logits"]

            b_s, s_l, v_s = predict.size()  # [batch_size, seq_len, vocab_size]: <lang_code> ... </s> <pad> ...
            predict = predict[:, :-1, :].reshape(-1, v_s) # [batch_size * (seq_len - 1), vocab_size], skip the last
            gt = batch["response"][:, 1:].reshape(-1)  # [batch_size * (seq_len - 1)], skip lang_token

            loss = F.cross_entropy(predict, gt, ignore_index=gen_tokenizer.pad_token_id)

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses))

        os.makedirs(f'ckpt/{gen_ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'ckpt/{gen_ckpt_name}/{epoch}.pt')


def train_rank1(language):
    # 用单语言知识对 rank 模型进行 warm up
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 5
    batch_size = 8
    lr = 2e-5
    neg_num = 4  # pool_size: neg_num+1

    model_name = r"../pretrain_models/infoxlm-base/"
    ckpt_name = f"infoxlm-base-rank-warmup1-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epochs {epochs}, batch_size {batch_size}, learning_rate {lr}, mode train")
    print(f"model name '{model_name}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data_fromone(f"{data_path}/{language}.txt", f"{data_path}/pool.pkl",
                                                        f"{data_path}/0.txt", "en")
    # context2, knowledge2, response2 = load_data_fromone(f"{data_path}/{language}.txt",
    #                                                     f"{data_path}/pool_{language}.pkl",
    #                                                     f"{data_path}/0_{language}.txt", language)
    # # \todo 非英文的 sudo-label 待添加权重
    dataset = MKGCRankData(context, knowledge, response, tokenizer, rank_mode=1, context_len=256, response_len=128,
                           neg_num=neg_num, pad_none=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))

    optimizer = AdamW(model.parameters(), lr=lr)

    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        for batch in tk0:
            b_s, p_s, _ = batch["knowledge"].size()  # p_s = negnum + 1
            # [batch_size * pool_size, context_len + knowledge_len]
            input_ids = torch.cat([batch["context"].unsqueeze(1).expand(-1, p_s, -1), batch["knowledge"]], dim=-1).reshape(b_s*p_s, -1)
            output = model(input_ids=input_ids, attention_mask=input_ids.ne(1).long().detach(), return_dict=True)
            logits = output.logits.view(-1, neg_num + 1)  # [batch_size * pool_size, 1] --> [batch_size, pool_size]
            loss = F.cross_entropy(logits, torch.zeros((logits.size(0),)).long().cuda())

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses))

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


def train_rank2(language):
    # 用双语知识（严格对齐的）对 rank 模型进行 warm up
    # 需要首先加载 train_rank1 所训练出来的模型
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 5
    batch_size = 6
    lr = 2e-5
    neg_num = 4  # pool_size: neg_num+2

    # model_name = r"D:\0MData\pretrain models\infoxlm-base"
    model_name = r"../pretrain_models/infoxlm-base/"
    pre_ckpt_name = f"infoxlm-base-rank-warmup1-{language}"
    ckpt_name = f"infoxlm-base-rank-warmup2-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epochs {epochs}, batch_size {batch_size}, learning_rate {lr}, mode train")
    print(f"model name '{model_name}', pre ckpt name {pre_ckpt_name}, ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=True, align_threshold=0.35, random_delete=False)
    # 0.25可以用来做in-batch对比；0.35可以用来做in-sample对比（hard negative）

    dataset = MKGCRankData(context, knowledge, response, tokenizer, rank_mode=2, context_len=256, response_len=128,
                           neg_num=neg_num, pad_none=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))

    maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{pre_ckpt_name}") if file.endswith(".pt")])
    model.load_state_dict(torch.load(f"ckpt/{pre_ckpt_name}/{maxindex}.pt"))
    print(f"Load rank1 model_state_dict form 'ckpt/{pre_ckpt_name}/{maxindex}.pt'")

    optimizer = AdamW(model.parameters(), lr=lr)

    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    ls = nn.LogSoftmax(dim=-1)
    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        lossks, lossc1s, lossc2s = [], [], []
        for batch in tk0:
            b_s, c_l = batch["context"].size()  # p_s = neg_num + 2
            b_s, p_s, _ = batch["knowledge"].size()
            input_ids = torch.cat([batch["context"].unsqueeze(1).expand(-1, p_s, -1), batch["knowledge"]], dim=-1).reshape(b_s*p_s, -1)
            output = model(input_ids=input_ids, attention_mask=input_ids.ne(1).long().detach(), output_hidden_states=True, return_dict=True)
            kcls = output.hidden_states[-1][:, c_l]  # [b_s * p_s, h_s]

            logits = output.logits.view(-1, p_s)  # [b_s * p_s, 1] --> [b_s, p_s]
            label = torch.cat([torch.zeros(b_s, 1), torch.ones(b_s, 1)], dim=-1)
            lossk = -torch.gather(ls(logits), dim=1, index=label.long().cuda()).mean(0).mean(0)
            # lossk = F.cross_entropy(logits, torch.zeros((logits.size(0),)).long().cuda()) + \
            #         F.cross_entropy(logits, torch.ones((logits.size(0),)).long().cuda())

            # in-batch constrictive loss  # \todo 对每个sample随机采样两条，同一sample下的两条知识互为正样例，其他互为负样例
            CL1 = ConLoss1(b_s)
            lossc1 = CL1(kcls[range(0, b_s*p_s, p_s)], kcls[range(1, b_s*p_s, p_s)])  # 对应位置互为正样例，其他互为负样例；不同sample间的对比
            # in-sample constrictive loss
            CL2 = ConLoss2(b_s)
            lossc2 = CL2(kcls.reshape(b_s, p_s, -1))  # 前两个互为正样例，其他互为负样例；同一sample间的对比

            loss = 0.6*lossk + 0.2*lossc1 + 0.2*lossc2

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            losses.append(loss.item())
            lossks.append(0.6*lossk.item())
            lossc1s.append(0.2*lossc1.item())
            lossc2s.append(0.2*lossc2.item())
            tk0.set_postfix(loss=sum(losses)/len(losses), lossk=sum(lossks)/len(lossks),
                            lossc1=sum(lossc1s)/len(lossc1s), lossc2=sum(lossc2s)/len(lossc2s))

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


def train_rank(language):
    # 用单语言知识和双语知识（严格对齐的）对 rank 模型进行 warm up
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 5
    batch_size1 = 6
    batch_size2 = 2
    lr = 2e-5
    neg_num = 4  # pool_size: neg_num+1

    # model_name = r"D:\0MData\pretrain models\infoxlm-base"
    model_name = r"../pretrain_models/infoxlm-base/"
    ckpt_name = f"infoxlm-base-rank-warmup-{language}"
    tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)

    print(f"epochs {epochs}, batch_size1 {batch_size1}, batch_size2 {batch_size2}, learning_rate {lr}, mode train")
    print(f"model name '{model_name}', ckpt name '{ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context1, knowledge1, response1 = load_data_fromone(f"{data_path}/{language}.txt", f"{data_path}/pool.pkl",
                                                        f"{data_path}/0.txt", "en")
    context2, knowledge2, response2 = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=True, align_threshold=0.35, random_delete=False)
    # 0.25可以用来做in-batch对比；0.35可以用来做in-sample对比（hard negative）
    dataset1 = MKGCRankData(context1, knowledge1, response1, tokenizer, rank_mode=1, context_len=256, response_len=128,
                           neg_num=neg_num, pad_none=True)
    data_loader1 = torch.utils.data.DataLoader(
        dataset1, collate_fn=dataset1.collate_fn, batch_size=batch_size1, shuffle=True, num_workers=8)
    dataset2 = MKGCRankData(context2, knowledge2, response2, tokenizer, rank_mode=2, context_len=256, response_len=128,
                           neg_num=neg_num-1, pad_none=True)  # negnum-1 确保 mode=1 和 mode=2 的 pool_size 一致
    data_loader2 = torch.utils.data.DataLoader(
        dataset2, collate_fn=dataset2.collate_fn, batch_size=batch_size2, shuffle=True, num_workers=8)

    config = AutoConfig.from_pretrained(model_name)
    config.num_labels = 1
    model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
    tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    model.resize_token_embeddings(len(tokenizer))

    optimizer = AdamW(model.parameters(), lr=lr)

    model, optimizer, data_loader1, data_loader2 = accelerator.prepare(model, optimizer, data_loader1, data_loader2)
    minsteps = min(len(data_loader1), len(data_loader2))
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=minsteps, num_training_steps=epochs * minsteps)
    scheduler = accelerator.prepare(scheduler)

    ls = nn.LogSoftmax(dim=-1)
    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(zip(data_loader1, data_loader2), total=minsteps)
        losses = []
        loss1s, lossks, lossc1s, lossc2s = [], [], [], []
        p_s = neg_num + 1
        for batch1, batch2 in tk0:
            b_s1, _ = batch1["context"].size()
            # [batch_size1 * pool_size, context_len1 + knowledge_len1]
            input_ids = torch.cat([batch1["context"].unsqueeze(1).expand(-1, p_s, -1), batch1["knowledge"]],
                                  dim=-1).reshape(b_s1 * p_s, -1)
            output = model(input_ids=input_ids, attention_mask=input_ids.ne(1).long().detach(), return_dict=True)
            logits = output.logits.view(-1, p_s)  # [b_s1 * p_s, 1] --> [b_s1, p_s]
            loss1 = F.cross_entropy(logits, torch.zeros((logits.size(0),)).long().cuda())

            b_s2, c_l2 = batch2["context"].size()
            # [batch_size2 * pool_size, context_len2 + knowledge_len2]
            input_ids = torch.cat([batch2["context"].unsqueeze(1).expand(-1, p_s, -1), batch2["knowledge"]],
                                  dim=-1).reshape(b_s2 * p_s, -1)
            output = model(input_ids=input_ids, attention_mask=input_ids.ne(1).long().detach(),
                           output_hidden_states=True, return_dict=True)
            kcls = output.hidden_states[-1][:, c_l2]  # [b_s2 * p_s, h_s]

            logits = output.logits.view(-1, p_s)  # [b_s2 * p_s, 1] --> [b_s2, p_s]
            label = torch.cat([torch.zeros(b_s2, 1), torch.ones(b_s2, 1)], dim=-1)  # [b_s2, 2]
            lossk = -torch.gather(ls(logits), dim=1, index=label.long().cuda()).mean(-1).mean(-1)

            # in-batch constrictive loss  # \todo 对每个sample随机采样两条，同一sample下的两条知识互为正样例，其他互为负样例
            CL1 = ConLoss1(b_s2)
            lossc1 = CL1(kcls[range(0, b_s2 * p_s, p_s)],
                         kcls[range(1, b_s2 * p_s, p_s)])  # 对应位置互为正样例，其他互为负样例；不同sample间的对比
            # in-sample constrictive loss
            CL2 = ConLoss2(b_s2)
            lossc2 = CL2(kcls.reshape(b_s2, p_s, -1))  # 前两个互为正样例，其他互为负样例；同一sample间的对比

            loss = 0.5 * loss1 + 0.5 * (0.6 * lossk + 0.2 * lossc1 + 0.2 * lossc2)

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            losses.append(loss.item())
            loss1s.append(0.5 * loss1.item())
            lossks.append(0.3 * lossk.item())
            lossc1s.append(0.1 * lossc1.item())
            lossc2s.append(0.1 * lossc2.item())
            tk0.set_postfix(loss=sum(losses) / len(losses), loss1=sum(loss1s) / len(loss1s), lossk=sum(lossks) / len(lossks),
                            lossc1=sum(lossc1s) / len(lossc1s), lossc2=sum(lossc2s) / len(lossc2s))

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


def train_reinforcement(language):
    # 使用强化学习，使用双语知识对 rank 模型进行进一步的训练
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 10
    batch_size = 3
    lr = 2e-5
    neg_num = 6  # pool_size: neg_num+2
    con_num = 3

    # rank_model_name = r"D:\0MData\pretrain models\infoxlm-base"
    rank_model_name = r"../pretrain_models/infoxlm-base/"
    pre_rank_ckpt_name = f"infoxlm-base-rank-warmup-{language}"
    rank_tokenizer = XLMRobertaTokenizer.from_pretrained(rank_model_name)

    # gen_model_name = r"D:\0MData\pretrain models\mbart-large-cc25"
    gen_model_name = r"../pretrain_models/mbart-large-cc25"
    pre_gen_ckpt_name = f"mbart-largecc25-generate-warmup-{language}"
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id

    ckpt_name = f"infoxlm-base-rank-reinforcement-{language}"

    print(f"epoches {epochs}, batch_size {batch_size}, mode train")
    print(f"rank model name '{rank_model_name}', pre rank ckpt name '{pre_rank_ckpt_name}', gen model name '{gen_model_name}', "
          f"pre gen ckpt name '{pre_gen_ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=False, random_delete=True, delete_threshold=0.7)

    dataset = MKGCData(context, knowledge, response, rank_tokenizer, gen_tokenizer, mode=2, context_len=256,
                       response_len=128, neg_num=neg_num, pad_none=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=8)

    rank_config = AutoConfig.from_pretrained(rank_model_name)
    rank_config.num_labels = 1
    rank_model = AutoModelForSequenceClassification.from_pretrained(rank_model_name, config=rank_config)
    rank_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    rank_model.resize_token_embeddings(len(rank_tokenizer))

    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))

    model = JointModel(rank_model, gen_model)
    maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{pre_rank_ckpt_name}") if file.endswith(".pt")])
    model.load_rank(f"ckpt/{pre_rank_ckpt_name}/{maxindex}.pt")
    maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{pre_gen_ckpt_name}") if file.endswith(".pt")])
    model.load_gen(f"ckpt/{pre_gen_ckpt_name}/{maxindex}.pt")

    model.gen_model.eval()
    model.rank_model.train()

    optimizer = AdamW(model.rank_model.parameters(), lr=lr)
    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    ls = nn.LogSoftmax(dim=-1)
    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        ppls = []
        for batch in tk0:
            b_s, _ = batch["rank_context"].size()
            p_s = neg_num + 2
            # input_ids [batch_size * pool_size, con_len + know_len]
            input_ids = torch.cat([batch["rank_context"].unsqueeze(1).expand(-1, p_s, -1), batch["rank_knowledge"]], dim=-1).reshape(b_s*p_s, -1)
            attention_masks = input_ids.ne(1).long().detach()
            # logits [batch_size, pool_size]
            # rank [batch_size, con_num]
            rank_output = model.rank(rank_inputs=input_ids, rank_masks=attention_masks, size=(b_s, p_s), k=con_num)
            # [batch_size, con_num]
            loss_origin = -torch.gather(ls(rank_output["logits"]), dim=1, index=rank_output["rank"])
            with torch.no_grad():
                # logits  [batch_size, seq_len, hidden_size]
                # max/mean_attentions  [batch_size, con_num]
                output = model.decode(context=batch["generate_context"],
                                      knowledges=batch["generate_knowledge"].reshape(b_s, p_s, -1),
                                      rank_index=rank_output["rank"], response=batch["response"])
            predict = output["logits"]
            attentions = output["mean_attentions"]
            _, seq_len, emb_size = predict.size()
            gt = batch["response"][:, 1:].reshape(-1)
            predict = predict[:, :-1, :].reshape(-1, emb_size)
            # [batch_size]
            loss_gen = F.cross_entropy(predict, gt, ignore_index=gen_tokenizer.pad_token_id, reduction="none")
            loss_gen = loss_gen.reshape(-1, seq_len-1).sum(dim=-1)
            # [batch_size]
            tk_num = batch["response"].ne(gen_tokenizer.pad_token_id).long().sum(dim=-1)
            ppl = torch.exp(loss_gen/tk_num)
            torch.clamp_(ppl, min=0., max=10.)  # truncate ppl to [0., 10.]
            ppls.append(ppl.mean().item())

            reward = torch.tensor(sum(ppls)/len(ppls)).cuda() - ppl
            torch.clamp_(reward, min=-2., max=2.)  # truncate reward to [-2., 2.]
            # 第一个sum是加权求和注意力得分，第二个mean是平均所有sample的loss
            loss = torch.mul(reward, torch.mul(attentions, loss_origin).sum(-1)).mean(-1)

            accelerator.backward(loss)
            accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses), ppl=ppl.mean().item(), reward=reward.mean().item())

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model.rank_model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


def test_rank(language="zh", mode="warmup"):
    # 评测每一阶段的 rank 模型
    assert mode in ["warmup", "warmup1", "warmup2", "reinforcement"]
    epoches = 10
    batch_size = 4
    con_num = 3

    rank_ckpt_path = "ckpt"
    rank_model_name = r"../pretrain_models/infoxlm-base/"
    rank_ckpt_name = f"infoxlm-base-rank-{mode}-{language}"
    # rank_ckpt_name = f"infoxlm-base-rank-{language}"
    rank_tokenizer = XLMRobertaTokenizer.from_pretrained(rank_model_name)

    gen_ckpt_path = "ckpt"
    gen_model_name = r"../pretrain_models/mbart-large-cc25/"
    gen_ckpt_name = f"mbart-largecc25-generate-warmup-{language}"
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id

    print(f"epoches {epoches}, batch_size {batch_size}, mode test {mode}")
    print(f"rank ckpt path '{rank_ckpt_path}', rank model name '{rank_model_name}', rank ckpt name '{rank_ckpt_name}', "
          f"gen ckpt path '{gen_ckpt_path}', gen model name '{gen_model_name}', gen ckpt name '{gen_ckpt_name}', "
          f"dialogue language '{language}'")

    data_path = "./wow/seen/"
    # context, knowledge, response = load_data_fromone(f"{data_path}/{language}.txt", f"{data_path}/pool.pkl",
    #                                                  f"{data_path}/0.txt", "en")
    context, knowledge, response = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=False, random_delete=False)
    dataset = MKGCData(context, knowledge, response, rank_tokenizer, gen_tokenizer, mode=2, context_len=256, response_len=128,
                        lang_code=mbart_lang_to_id[language], neg_num=128, pad_none=True)

    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    rank_config = AutoConfig.from_pretrained(rank_model_name)
    rank_config.num_labels = 1
    rank_model = AutoModelForSequenceClassification.from_pretrained(rank_model_name, config=rank_config)
    rank_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    rank_model.resize_token_embeddings(len(rank_tokenizer))

    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))

    model = JointModel(rank_model, gen_model)
    model = model.cuda()

    # generate 使用 zh+en 知识训练的模型
    # maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{gen_ckpt_name}") if file.endswith(".pt")])
    model.load_gen(f"{gen_ckpt_path}/{gen_ckpt_name}/2.pt")

    for epoch in range(epoches):
        if not os.path.exists(f'{rank_ckpt_path}/{rank_ckpt_name}/{epoch}.pt'):
            continue
        print(f'Test {rank_ckpt_path}/{rank_ckpt_name}/{epoch}.pt')
        model.load_rank(f"{rank_ckpt_path}/{rank_ckpt_name}/{epoch}.pt")
        tk0 = tqdm(data_loader, total=len(data_loader))
        f1_report = []
        outputs_predict = []
        outputs_true = []
        model.eval()
        with torch.no_grad():
            for batch in tk0:
                batch = {k: v.cuda() for k, v in batch.items()}
                b_s, p_s, _ = batch["rank_knowledge"].size()
                input_ids = torch.cat([batch["rank_context"].unsqueeze(1).expand(-1, p_s, -1), batch["rank_knowledge"]],
                                      dim=-1).reshape(b_s * p_s, -1)
                attention_masks = input_ids.ne(1).long().detach()
                # logits [batch_size, pool_size]
                # rank [batch_size, con_num]
                rank_output = model.rank(rank_inputs=input_ids, rank_masks=attention_masks, size=(b_s, p_s), k=con_num)
                gen_output = model.generate(batch["generate_context"], batch["generate_knowledge"],
                                            rank_output["rank"], mbart_lang_to_id[language])

                predict_sent = gen_tokenizer.batch_decode(gen_output["predict"], skip_special_tokens=True)
                label_sent = gen_tokenizer.batch_decode(batch["response"], skip_special_tokens=True)

                outputs_predict.extend(predict_sent)
                outputs_true.extend(label_sent)

                if language == "zh":
                    f1 = [f1_score(' '.join(pred), [' '.join(label)]) for pred, label in zip(predict_sent, label_sent)]
                else:
                    f1 = [f1_score(pred, [label]) for pred, label in zip(predict_sent, label_sent)]
                f1_report.extend(f1)
                tk0.set_postfix(f1=sum(f1_report) / len(f1_report))

        if language == "zh":
            outputs_true = [' '.join(ot) for ot in outputs_true]
            outputs_predict = [' '.join(op) for op in outputs_predict]
        eval_dict = eval_all(outputs_predict, outputs_true)
        print(eval_dict)
        write_file(outputs_predict, f'{rank_ckpt_path}/{rank_ckpt_name}/{epoch}.txt')


def test_generate(language="zh", mode="seen"):
    # mode ["seen", "unseen"]
    # 评测 warmup 的 generate 模型，不进行 rank
    assert mode in ["seen", "unseen"]
    epoches = 10
    batch_size = 32
    con_num = 3
    neg_num = 128

    gen_model_name = r"../pretrain_models/mbart-large-cc25/"
    gen_ckpt_name = f"mbart-largecc25-generate-warmup-{language}"
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id

    print(f"epoches {epoches}, batch_size {batch_size}, mode test generate warmup {mode}")
    print(f"gen model name '{gen_model_name}', gen ckpt name '{gen_ckpt_name}', dialogue language '{language}'")

    data_path = f"./wow/{mode}/"
    context, knowledge, response = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=False, random_delete=False)

    dataset = MKGCGenerateData(context, knowledge, response, gen_tokenizer, generate_mode=2, context_len=256,
                               response_len=128, lang_code=mbart_lang_to_id[language], con_num=con_num,
                               neg_num=neg_num, pad_none=True, shuffle=False)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False)

    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))

    model = gen_model
    model = model.cuda()

    for epoch in range(epoches):
        if not os.path.exists(f'ckpt/{gen_ckpt_name}/{epoch}.pt'):
            continue
        print(f'Test ckpt/{gen_ckpt_name}/{epoch}.pt')
        model.load_state_dict(torch.load(f'ckpt/{gen_ckpt_name}/{epoch}.pt'))
        tk0 = tqdm(data_loader, total=len(data_loader))
        f1_report = []
        outputs_predict = []
        outputs_true = []
        model.eval()
        with torch.no_grad():
            for batch in tk0:
                batch = {k: v.cuda() for k, v in batch.items()}
                predict = model.generate(input_ids=batch["context"], attention_mask=batch["context_mask"],
                                         decoder_start_token_id=mbart_lang_to_id[language], num_beams=3, max_length=128)

                predict_sent = gen_tokenizer.batch_decode(predict, skip_special_tokens=True)
                label_sent = gen_tokenizer.batch_decode(batch["response"], skip_special_tokens=True)

                outputs_predict.extend(predict_sent)
                outputs_true.extend(label_sent)

                if language == "zh":
                    f1 = [f1_score(' '.join(pred), [' '.join(label)]) for pred, label in zip(predict_sent, label_sent)]
                else:
                    f1 = [f1_score(pred, [label]) for pred, label in zip(predict_sent, label_sent)]
                f1_report.extend(f1)
                tk0.set_postfix(f1=sum(f1_report) / len(f1_report))

        if language == "zh":
            outputs_true = [' '.join(ot) for ot in outputs_true]
            outputs_predict = [' '.join(op) for op in outputs_predict]
        eval_dict = eval_all(outputs_predict, outputs_true)
        print(eval_dict)
        write_file(outputs_predict, f'ckpt/{gen_ckpt_name}/{epoch}.txt')


def test_reinforcement(language):
    # 使用强化学习，使用双语知识对 rank 模型进行进一步的训练
    accelerator = Accelerator()
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'

    epochs = 10
    batch_size = 3
    lr = 2e-5
    neg_num = 6  # pool_size: neg_num+2
    con_num = 3

    # rank_model_name = r"D:\0MData\pretrain models\infoxlm-base"
    rank_model_name = r"../pretrain_models/infoxlm-base/"
    pre_rank_ckpt_name = f"infoxlm-base-rank-warmup-{language}"
    rank_tokenizer = XLMRobertaTokenizer.from_pretrained(rank_model_name)

    # gen_model_name = r"D:\0MData\pretrain models\mbart-large-cc25"
    gen_model_name = r"../pretrain_models/mbart-large-cc25"
    pre_gen_ckpt_name = f"mbart-largecc25-generate-warmup-{language}"
    gen_tokenizer = MBartTokenizer.from_pretrained(gen_model_name)
    gen_tokenizer.lang_code_to_id = mbart_lang_to_id

    ckpt_name = f"infoxlm-base-rank-reinforcement-{language}"

    print(f"epoches {epochs}, batch_size {batch_size}, mode train")
    print(f"rank model name '{rank_model_name}', pre rank ckpt name '{pre_rank_ckpt_name}', gen model name '{gen_model_name}', "
          f"pre gen ckpt name '{pre_gen_ckpt_name}', dialogue language '{language}'")

    data_path = "./wow/train/"
    context, knowledge, response = load_data_fromtwo(f"{data_path}/{language}.txt",
                                                     [f"{data_path}/pool.pkl", f"{data_path}/pool_{language}.pkl"],
                                                     [f"{data_path}/0.txt", f"{data_path}/0_{language}.txt"],
                                                     ["en", language], f"{data_path}/en2{language}.json",
                                                     strict_align=False, random_delete=False)

    dataset = MKGCData(context, knowledge, response, rank_tokenizer, gen_tokenizer, mode=2, context_len=256,
                       response_len=128, neg_num=neg_num, pad_none=True)
    data_loader = torch.utils.data.DataLoader(
        dataset, collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=True, num_workers=1)

    rank_config = AutoConfig.from_pretrained(rank_model_name)
    rank_config.num_labels = 1
    rank_model = AutoModelForSequenceClassification.from_pretrained(rank_model_name, config=rank_config)
    rank_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    rank_model.resize_token_embeddings(len(rank_tokenizer))

    gen_config = AutoConfig.from_pretrained(gen_model_name)
    gen_model = MBartForConditionalGeneration.from_pretrained(gen_model_name, config=gen_config)
    gen_tokenizer.add_tokens(['<topic>', '<wizard>', '<apprentice>', '<knowledge>', '<none>'])
    gen_model.resize_token_embeddings(len(gen_tokenizer))

    model = JointModel(rank_model, gen_model)
    maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{pre_rank_ckpt_name}") if file.endswith(".pt")])
    model.load_rank(f"ckpt/{pre_rank_ckpt_name}/{maxindex}.pt")
    maxindex = max([int(file.split('.')[0]) for file in os.listdir(f"ckpt/{pre_gen_ckpt_name}") if file.endswith(".pt")])
    model.load_gen(f"ckpt/{pre_gen_ckpt_name}/{maxindex}.pt")

    model.gen_model.eval()
    model.rank_model.train()

    optimizer = AdamW(model.rank_model.parameters(), lr=lr)
    model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=len(data_loader), num_training_steps=epochs * len(data_loader))
    scheduler = accelerator.prepare(scheduler)

    ls = nn.LogSoftmax(dim=-1)
    for epoch in range(epochs):
        accelerator.wait_for_everyone()
        accelerator.print(f'train epoch={epoch}')
        tk0 = tqdm(data_loader, total=len(data_loader))
        losses = []
        ppls = []

        for batch in tk0:
            b_s, _ = batch["rank_context"].size()
            p_s = neg_num + 2
            # input_ids [batch_size * pool_size, con_len + know_len]
            input_ids = torch.cat([batch["rank_context"].unsqueeze(1).expand(-1, p_s, -1), batch["rank_knowledge"]], dim=-1).reshape(b_s*p_s, -1)
            attention_masks = input_ids.ne(1).long().detach()
            # logits [batch_size, pool_size]
            # rank [batch_size, con_num]
            index = torch.arange(0, con_num).unsqueeze(0).expand(b_s, -1).cuda()
            rank_output = model.rank(rank_inputs=input_ids, rank_masks=attention_masks, size=(b_s, p_s), k=con_num)
            # [batch_size, con_num]
            loss_origin = -torch.gather(ls(rank_output["logits"]), dim=1, index=rank_output["rank"])
            with torch.no_grad():
                # logits  [batch_size, seq_len, hidden_size]
                # mean/max_attentions  [batch_size, con_num]
                output = model.decode(context=batch["generate_context"],
                                      knowledges=batch["generate_knowledge"].reshape(b_s, p_s, -1),
                                      rank_index=index, response=batch["response"])
            predict = output["logits"]
            mean_attentions = output["mean_attentions"]
            max_attentions = output["max_attentions"]
            print("attentions:")
            print(mean_attentions.cpu().tolist())
            print(max_attentions.cpu().tolist())

            _, seq_len, emb_size = predict.size()
            gt = batch["response"][:, 1:].reshape(-1)
            predict = predict[:, :-1, :].reshape(-1, emb_size)
            # [batch_size]
            loss_gen = F.cross_entropy(predict, gt, ignore_index=gen_tokenizer.pad_token_id, reduction="none")
            loss_gen = loss_gen.reshape(-1, seq_len-1).sum(dim=-1)

            # [batch_size]
            tk_num = batch["response"].ne(gen_tokenizer.pad_token_id).long().sum(dim=-1)
            ppl = torch.exp(loss_gen/tk_num)
            print("ppls:")
            print(ppl.cpu().tolist())
            torch.clamp_(ppl, min=0., max=10.)  # truncate ppl to [0., 10.]
            ppls.append(ppl.mean().item())

            reward = torch.tensor(sum(ppls)/len(ppls)).cuda() - ppl
            print("reard")
            print(reward.cpu().tolist())
            torch.clamp_(reward, min=-2., max=2.)  # truncate reward to [-2., 2.]
            # 第一个sum是加权求和注意力得分，第二个mean是平均所有sample的loss
            lossmean = torch.mul(reward, torch.mul(mean_attentions, loss_origin).sum(-1)).mean(-1)
            lossmax = torch.mul(reward, torch.mul(max_attentions, loss_origin).sum(-1)).mean(-1)
            print("lossmean:\n", lossmean.cpu())
            print("lossmax:\n", lossmax.cpu())
            loss = 0.5*lossmean + 0.5*lossmax

            # accelerator.backward(loss)
            # accelerator.clip_grad_norm_(model.parameters(), 1.)
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            losses.append(loss.item())
            tk0.set_postfix(loss=sum(losses) / len(losses), ppl=ppl.mean().item(), reward=reward.mean().item())

        os.makedirs(f'ckpt/{ckpt_name}', exist_ok=True)
        if accelerator.is_local_main_process:
            accelerator.save(accelerator.unwrap_model(model.rank_model).state_dict(), f'ckpt/{ckpt_name}/{epoch}.pt')


if __name__ == "__main__":
    # 两阶段训练 rank 和 generate 模型
    # os.environ["CUDA_VISIBLE_DEVICES"] = "2"  # 24G GPU 条件下
    # init_seed(123456)
    # train_rank1("zh")  # 8 23G
    # train_rank2("zh")  # 6 23G
    train_rank("zh")
    # train_generate("zh")  # 8 23G  # 6 23G

    # train_reinforcement("zh")  # 4 23G

    # test_rank("zh", "warmup")
    # test_rank("zh", "warmup1")
    test_rank("zh", "warmup2")
    # test_reinforcement("zh")  # 4 23G
    # test_generate("zh", "seen")
