#! -*- coding: utf-8 -*-
# SimBERT预训练代码，也可用于微调，微调方式用其他方式比如sentence_bert的可能更好
# 官方项目：https://github.com/ZhuiyiTechnology/simbert
import argparse
import os
import json

from datetime import datetime

import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb
from bert4torch.tokenizers import Tokenizer, load_vocab
from torch import nn, optim
from torch.utils.data import DataLoader
from torch4keras.callbacks import EarlyStopping
from torch4keras.snippets import send_email

# 基本信息
maxlen = 80
batch_size = 32

# 这里加载的是simbert权重，在此基础上用自己的数据继续pretrain/finetune
# 自己从头预训练也可以直接加载bert/roberta等checkpoint
model_dir = 'roformer_chinese_sim_char_base'
assert os.path.exists(model_dir)
config_path = f'{model_dir}/config.json'
checkpoint_path = f'{model_dir}/pytorch_model.bin'
weight_path = f'{model_dir}/best_model.pt'
dict_path = f'{model_dir}/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
data_path = 'data/data_similarity-simple.json'  # train.csv dev.csv test.csv

# 训练异常发邮件
# 邮件配置信息
mail_receivers_when_error = '476922504@qq.com'  # 接收邮件的地址
mail_host_when_error = 'smtp.qq.com'  # SMTP服务器地址
mail_user_when_error = '476922504@qq.com'  # 用户名
mail_pwd_when_error = 'fcbrfxkrljxgcagf'  # 密码
mail_sender_when_error = '476922504@qq.com'  # 发送邮件的地址
mail_subject_when_error = '训练异常终止'
mail_subject_when_success = '训练完成通知'

# 其他配置
save_ckpt_dir_when_error = 'checkpoints'  # 异常时保存权重文件夹
save_batch_path_when_error = f'{save_ckpt_dir_when_error}/model.pt'  # 异常时保存当前batch的权重的路径

# 打印模型参数
# dist = torch.load(os.path.join('WangZeJun_roformer-sim-small-chinese', "pytorch_model.bin"))
# for param in dist:
#     print(param)

# 加载并精简词表，建立分词器
token_dict, keep_tokens = load_vocab(
    dict_path=dict_path,
    simplified=True,
    startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)


# self.tokenizer = BertTokenizerFast.from_pretrained(model_path)
# self.model = RoFormerModelWithPooler(model_path)

class MyDataset(ListDataset):
    @staticmethod
    def load_data(filename):

        """读取语料，每行一个json
        示例：{"text": "懂英语的来！", "synonyms": ["懂英语的来！！！", "懂英语的来", "一句英语翻译  懂英语的来"]}
        """
        D = []
        if os.path.splitext(filename)[1] in ('.json'):
            with open(filename, encoding='utf-8') as f:
                for l in f:
                    D.append(json.loads(l))
        else:
            df = pd.read_csv(filename, encoding='utf-8')

            # 构建 JSON 对象并添加到列表中
            for index, row in df.iterrows():
                label = row['label']
                if label == 1:
                    json_obj = {
                        "text": row['sentence1'],
                        "synonyms": [row['sentence2']],
                        "label": label
                    }
                    D.append(json_obj)

        return D


def truncate(text):
    """截断句子
    """
    seps, strips = u'\n。！？!?；;，, ', u'；;，, '
    return text_segmentate(text, maxlen - 2, seps, strips)[0]


# 数据集的预处理函数，用于批处理
def collate_fn(batch):
    """
    对批次数据进行处理，以便于模型训练。

    处理包括将文本与其同义词打乱顺序并截断，以及将文本转换为token ids和segment ids。
    该函数还负责将处理后的数据转换为张量，以便在PyTorch模型中使用。

    参数:
    batch: 一个批次的数据列表，每个数据项包含'text'和'synonyms'字段。

    返回:
    一个元组，包含输入序列的token ids和segment ids，以及与输入相同的数据作为标签。
    """
    # 初始化批次的token ids和segment ids列表
    batch_token_ids, batch_segment_ids = [], []

    # 遍历批次中的每个数据项
    for d in batch:
        # 获取文本和同义词
        text, synonyms = d['text'], d['synonyms']
        # 将原文本添加到同义词列表的开头
        synonyms = [text] + synonyms
        # 打乱同义词列表的顺序
        np.random.shuffle(synonyms)
        # 获取打乱顺序后的文本和同义词
        text, synonym = synonyms[:2]
        # 对文本和同义词进行截断，以符合最大长度限制
        text, synonym = truncate(text), truncate(synonym)
        # 使用tokenizer将文本和同义词编码为token ids和segment ids，并添加到批次列表中
        token_ids, segment_ids = tokenizer.encode(text, synonym, maxlen=maxlen * 2)
        batch_token_ids.append(token_ids)
        batch_segment_ids.append(segment_ids)
        # 再次编码，交换文本和同义词的位置，以增加数据的多样性
        token_ids, segment_ids = tokenizer.encode(synonym, text, maxlen=maxlen * 2)
        batch_token_ids.append(token_ids)
        batch_segment_ids.append(segment_ids)

    # 将批次的token ids和segment ids转换为张量，并发送到指定设备（CPU或GPU）
    batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
    batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
    # 返回输入数据和标签数据，这里标签数据与输入数据相同
    return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids]


train_dataloader = DataLoader(MyDataset(data_path), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)


# 建立加载模型
class Model(BaseModel):
    def __init__(self, pool_method='cls'):
        super().__init__()
        self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path,
                                            with_pool='linear',
                                            with_mlm='linear',
                                            application='unilm',
                                            keep_tokens=keep_tokens)
        self.pool_method = pool_method

    def forward(self, token_ids, segment_ids):
        hidden_state, pool_cls, seq_logit = self.bert([token_ids, segment_ids])
        sen_emb = get_pool_emb(hidden_state, pool_cls, token_ids.gt(0).long(), self.pool_method)
        return seq_logit, sen_emb


model = Model(pool_method='cls').to(device)
if os.path.exists(weight_path):
    model.load_weights(weight_path)


class TotalLoss(nn.Module):
    """loss分两部分，一是seq2seq的交叉熵，二是相似度的交叉熵。
    """

    def forward(self, outputs, target):
        seq_logit, sen_emb = outputs
        seq_label, seq_mask = target

        # 计算seq2seq模型的损失值
        # 这个函数根据序列的预测结果(seq_logit)、真实标签(seq_label)以及掩码(seq_mask)来计算损失
        # 掩码用于处理变长序列，避免padding带来的干扰
        seq2seq_loss = self.compute_loss_of_seq2seq(seq_logit, seq_label, seq_mask)

        # 计算语义相似度的损失值
        # 这个函数通过计算句子嵌入(sen_emb)之间的相似度来获得损失，用于优化模型对语义的理解和表示能力
        similarity_loss = self.compute_loss_of_similarity(sen_emb)
        return {'loss': seq2seq_loss + similarity_loss, 'seq2seq_loss': seq2seq_loss,
                'similarity_loss': similarity_loss}

    def compute_loss_of_seq2seq(self, y_pred, y_true, y_mask):
        '''
        y_pred: [btz, seq_len, hdsz]
        y_true: [btz, seq_len]
        y_mask: [btz, seq_len]
        '''

        y_true = y_true[:, 1:]  # 目标token_ids
        y_mask = y_mask[:, 1:]  # 指示了要预测的部分
        y_pred = y_pred[:, :-1, :]  # 预测序列，错开一位

        y_pred = y_pred.reshape(-1, y_pred.shape[-1])
        y_true = (y_true * y_mask).flatten()
        return F.cross_entropy(y_pred, y_true, ignore_index=0)

    def compute_loss_of_similarity(self, y_pred):
        y_true = self.get_labels_of_similarity(y_pred)  # 构建标签
        y_pred = F.normalize(y_pred, p=2, dim=-1)  # 句向量归一化
        similarities = torch.matmul(y_pred, y_pred.T)  # 相似度矩阵
        similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12  # 排除对角线
        similarities = similarities * 30  # scale

        loss = F.cross_entropy(similarities, y_true)
        return loss

    def get_labels_of_similarity(self, y_pred):
        idxs = torch.arange(0, y_pred.shape[0], device=device)
        idxs_1 = idxs[None, :]
        idxs_2 = (idxs + 1 - idxs % 2 * 2)[:, None]
        labels = idxs_1.eq(idxs_2).float()
        return labels


def find_top_similar_sentences(target_text, topN=5, file_path="data/input.txt"):
    """找到与目标句子相关的topN个句子
    """
    # 读取文件中的每一行作为一个句子
    with open(file_path, 'r', encoding='utf-8') as file:
        sentences = file.readlines()
        text_list = [sentence.strip() for sentence in sentences]

    # 计算所有句子的嵌入表示
    Z = cal_sen_emb([target_text] + text_list)
    # 归一化向量
    Z /= (Z ** 2).sum(dim=1, keepdim=True) ** 0.5
    # 计算相似度
    similarity = torch.matmul(Z[1:], Z[0])

    # 获取最相似的 topN 个句子的索引
    top_indices = torch.topk(similarity, k=topN).indices
    # 输出最相似的 topN 个句子及其相似度
    for idx in top_indices:
        i = idx.item()  # 获取索引值
        print(f'cos_sim: {similarity[i].item():.4f}, tgt_text: "{target_text}", cal_text: "{text_list[i]}"')


def cal_sen_emb(text_list):
    '''输入text的list，计算sentence的embedding
    '''
    X, S = [], []
    for t in text_list:
        x, s = tokenizer.encode(t)
        X.append(x)
        S.append(s)
    X = torch.tensor(sequence_padding(X), dtype=torch.long, device=device)
    S = torch.tensor(sequence_padding(S), dtype=torch.long, device=device)
    _, Z = model.predict([X, S])
    return Z


def just_show(some_samples):
    """随机观察一些样本的效果
    """
    S = [np.random.choice(some_samples) for _ in range(1)]
    for s in S:
        try:
            find_top_similar_sentences(s, 10)
        except:
            pass


class Evaluator(Callback):
    """评估模型
    """

    def __init__(self, optimizer):
        self.lowest = 1e10
        self.optimizer = optimizer

    def on_epoch_end(self, global_step, epoch, logs=None):
        # =======断点续训========
        # 保存本轮询练完的模型权重，断点续训使用
        model.save_to_checkpoint(save_ckpt_dir_when_error)

        # 保存最优
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']

    def on_train_end(self, logs:dict=None):
        # 保存最终权重结果
        model.save_weights(f'{weight_path}')
        # 演示效果
        just_show(['花呗有***天宽限期吗', '花呗可以在淘宝上使用吗', '淘宝充红包的金额能和花呗叠加使用吗',
                   '花呗还款了，还显示要还款', '我的花呗需要还款', '我的花呗为什么不让用', '我的支付宝花呗不能用'])


def model_fit(train_dataloader, evaluator=Evaluator(optim.Adam(model.parameters(), 1e-5)),
              early_stopping=EarlyStopping()):
    assert isinstance(train_dataloader, DataLoader)

    model.fit(
        train_dataloader,
        epochs=1,
        callbacks=[evaluator, early_stopping],
        mail_receivers_when_error=mail_receivers_when_error,
        mail_host_when_error=mail_host_when_error,
        mail_user_when_error=mail_user_when_error,
        mail_pwd_when_error=mail_pwd_when_error,
        mail_sender_when_error=mail_sender_when_error,
        mail_subject_when_error=mail_subject_when_error,
        save_ckpt_dir_when_error=save_ckpt_dir_when_error,
        save_batch_path_when_error=save_batch_path_when_error
    )

    send_email(mail_receivers_when_error,
               mail_subject_when_success,
               '训练完成',
               mail_host=mail_host_when_error,
               mail_user=mail_user_when_error,
               mail_pwd=mail_pwd_when_error,
               mail_sender=mail_sender_when_error)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='加载已训练的权重路径')
    parser.add_argument('--choice', type=str, help='train/generate/similarity')
    parser.add_argument('--weight_path', type=str, help='预训练权重的路径')
    args = parser.parse_args()
    if args.choice:
        choice = args.choice

    # train 训练 train4breakpoint 断点续训 similarity 获取相似句子
    choice = input("选择训练还是断点续训或计算相似度？(train/train4breakpoint/similarity)")
    choice = choice if choice else 'similarity'

    if choice == 'train':
        # 定义优化器
        optimizer = optim.Adam(model.parameters(), 1e-5)
        model.compile(loss=TotalLoss(),
                      optimizer=optimizer,
                      metrics=['seq2seq_loss', 'similarity_loss'])

        # 早停策略, 3个epoch无提升则停止
        early_stopping = EarlyStopping(patience=3, verbose=True)
        evaluator = Evaluator(optimizer)
        model_fit(train_dataloader, evaluator, early_stopping)

    if choice == 'train4breakpoint':
        assert os.path.exists(save_batch_path_when_error)
        assert os.path.exists(f"{save_ckpt_dir_when_error}/steps_params.pt")
        assert os.path.exists(f"{save_ckpt_dir_when_error}/optimizer.pt")
        # 加载前序训练保存的参数
        # 加载模型权重
        model.load_weights(save_batch_path_when_error)
        # 加载训练进度参数，断点续训使用
        model.load_steps_params(f"{save_ckpt_dir_when_error}/steps_params.pt")
        # 加载优化器，断点续训使用
        state_dict = torch.load(f"{save_ckpt_dir_when_error}/optimizer.pt")
        optimizer = optim.Adam(model.parameters(), 1e-5)
        optimizer.load_state_dict(state_dict)

        early_stopping = EarlyStopping(patience=3, verbose=True)
        evaluator = Evaluator(optimizer)
        model.compile(loss=TotalLoss(),
                      optimizer=optimizer,
                      metrics=['seq2seq_loss', 'similarity_loss'])
        model_fit(train_dataloader, evaluator, early_stopping)

    elif choice == 'similarity':
        target_text = "花呗为什么不让用"
        find_top_similar_sentences(target_text, 10)
