# -*- coding:utf8 -*-
# @Time : 2022/10/20 4:17 下午
# @Author : WanJie Wu


import os
import torch
import random
import argparse
import numpy as np
import datetime
from loguru import logger
from tensorboardX import SummaryWriter
from models import SimCseModel
from datasets import init_data_loader
from torch.backends import cudnn
from torch.optim import Adam
import torch.nn.functional as F
from dataclasses import dataclass
from scipy.stats import spearmanr
from transformers import BertTokenizer
from torch.nn.utils import clip_grad_value_


def setup_seed(seed):
    """
    设置CPU生成随机数种子
    为所有GPU设置随机数种子
    numpy、random随机数种子
    每次返回固定的卷积算法
    """
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministic = True


def simcse_sup_loss(y_pred, args):
    """
    y_pred: (batch_size * 3, embeddings)
    正例句子相似度越大越好，负例句子相似度越小越好。
    已知句子s1、s2、s3；其中s1和s2相似, s1和s3矛盾
    说明1： 形似度矩阵对角线设定为最小值, 因为句子和本身很相似，排除和自己对比；
    说明2： 一个batch内分组的正例句子相似度越大越好，负例相似度越小越好，可以当作分类处理。具体如下图
                s0                 s1                      s2
    s0        -inf             s0与s1相似度很高,接近1        s0与s2相似度很低,接近0
    s1    s1和s0相似度很高,接近1          -inf               s1与s2相似度很低,接近0
    s2    s2和s0相似度很低,接近0     和s2相似度很低,接近0        -inf

    """
    # 组内基准数据，index为 0, 3, 6, ...
    row = torch.arange(0, y_pred.shape[0], 3).to(args.device)
    # 正例和负例数据, 2条，index为1, 2, 4, 5, ...
    col = torch.arange(y_pred.shape[0]).to(args.device)
    col = torch.where(col%3!=0)[0].to(args.device)
    # 正例数据的index,例如： 0, 2, 4, 6, ...
    y_true = torch.arange(0, len(col), 2).to(args.device)
    # batch内两两计算相似度, 得到相似度矩阵, (batch_size * 3) * (batch_size * 3)
    similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
    # 将对角线调整为很小的值，减少分类效果影响
    similarities = similarities - torch.eye(y_pred.shape[0]).to(args.device) * 1e12
    # 选取基础行， batch_size * (batch_size * 24)
    similarities = torch.index_select(similarities, 0, row)
    similarities = torch.index_select(similarities, 1, col)
    similarities = similarities / torch.tensor(args.temperature, dtype=torch.float)
    loss = F.cross_entropy(similarities, y_true)
    return loss


def simcse_unsup_loss(y_pred, args):
    y_true = torch.arange(y_pred.shape[0], device=args.device)
    y_true = (y_true - y_true % 2 * 2) + 1
    sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
    sim = sim - torch.eye(y_pred.shape[0], device=args.device) * 1e12
    sim = sim / torch.tensor(args.temperature, dtype=torch.float)  # 相似度矩阵除以温度系数
    loss = F.cross_entropy(sim, y_true)
    return loss


def evaluation(model, dev_loader, args):
    """
    评价指标： 斯皮尔曼系数
    """
    logger.info("开始数据集评估...")
    model.eval()
    sim_lst = np.array([])
    label_lst = np.array([])
    with torch.no_grad():
        for batch_idx, item in enumerate(dev_loader):
            source, target, label = item
            src_input_ids = source["input_ids"].squeeze(1).to(args.device)
            src_attention_mask = source["attention_mask"].squeeze(1).to(args.device)
            src_token_type_ids = source["token_type_ids"].squeeze(1).to(args.device)

            tgt_input_ids = target["input_ids"].squeeze(1).to(args.device)
            tgt_attention_mask = target["attention_mask"].squeeze(1).to(args.device)
            tgt_token_type_ids = target["token_type_ids"].squeeze(1).to(args.device)

            src_pred = model(src_input_ids, src_attention_mask, src_token_type_ids, args.output_hidden_states)
            tgt_pred = model(tgt_input_ids, tgt_attention_mask, tgt_token_type_ids, args.output_hidden_states)

            sim = F.cosine_similarity(src_pred, tgt_pred, dim=-1)
            sim_lst = np.append(sim_lst, sim.cpu().numpy())
            label_lst = np.append(label_lst, label.cpu().numpy())
    return spearmanr(label_lst, sim_lst).correlation


@dataclass
class MidVariables:
    model_file_name: str = ""
    cur_epoch: int = 0
    global_step: int = 0
    best_corrcoef: float = 0.0


def train_epoch(model, train_loader, dev_loader, optimizer, writer, args, mid_vars):
    model.train()
    for batch_idx, item in enumerate(train_loader):
        seq_length = item["input_ids"].size(-1)

        input_ids = item["input_ids"].view(-1, seq_length).to(args.device)
        attention_mask = item["attention_mask"].view(-1, seq_length).to(args.device)
        token_type_ids = item["token_type_ids"].view(-1, seq_length).to(args.device)

        out = model(input_ids, attention_mask, token_type_ids, args.output_hidden_states)
        if args.mode == "sup":
            loss = simcse_sup_loss(out, args)
        else:
            loss = simcse_unsup_loss(out, args)
        optimizer.zero_grad()
        loss.backward()
        clip_grad_value_(model.parameters(), args.max_grad_norm)
        optimizer.step()
        mid_vars.global_step += 1
        logger.info(f"global_steps: {mid_vars.global_step}, loss: {loss.cpu().item():.4f}")
        writer.add_scalar("train_loss", loss, mid_vars.global_step)

        if mid_vars.global_step % args.evaluation_interval_steps != 0:
            continue

        corrcef = evaluation(model, dev_loader, args)
        writer.add_scalar("corrcoef", corrcef, mid_vars.global_step)
        model.train()
        if mid_vars.best_corrcoef < corrcef:
            logger.info("正在保存最优模型")
            mid_vars.best_corrcoef = corrcef
            mid_vars.model_file_name = f"dropout[{args.dropout}]_pooler[{args.pooler}]_spearman[{round(corrcef, 3)}].pkl"
            torch.save(model.state_dict(), os.path.join(args.model_output_dir, mid_vars.model_file_name))


def main(args):
    """
    1. 加载模型和分词器
    2. 读取数据集（训练、验证、测试）
    3. 优化器选择与设定
    4. 开始模型训练
    """
    writer = SummaryWriter(logdir=args.summary_log_dir)
    model = SimCseModel(pretrained_model=args.model_name_or_path, pooling=args.pooler, dropout=args.dropout)
    model.to(args.device)

    tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
    mid_vars = MidVariables()
    train_dataset, dev_dataset, test_dataset = init_data_loader(
        tokenizer=tokenizer,
        train_path=args.train_path,
        dev_path=args.dev_path,
        test_path=args.test_path,
        batch_size=args.batch_size,
        max_seq_length=args.max_seq_length,
        mode=args.mode
    )
    args.evaluation_interval_steps = int(len(train_dataset) / args.evaluation_per_epoch)
    optimizer = Adam(model.parameters(), lr=args.learning_rate)

    logger.info("开始模型训练")
    for _ in range(args.epochs):
        mid_vars.cur_epoch += 1
        logger.info(f"--- train start epoch {mid_vars.cur_epoch}---\n")
        train_epoch(model, train_dataset, dev_dataset, optimizer, writer, args, mid_vars)

    model.load_state_dict(torch.load(os.path.join(args.model_output_dir, mid_vars.model_file_name)))
    corrcef = evaluation(model, test_dataset, args)
    logger.info(f"test:  \t{corrcef}")

def add_dataset_arguments(parser):
    """路径相关配置参数"""
    group = parser.add_argument_group("dataset", description="路径相关配置参数")
    group.add_argument("--train_path", type=str, required=True, help="模型训练集路径")
    group.add_argument("--dev_path", type=str, required=True, help="模型验证集路径")
    group.add_argument("--test_path", type=str, required=True, help="模型测试集路径")
    group.add_argument("--output_dir", type=str, required=True, help="模型保存目录")
    group.add_argument("--model_name_or_path", type=str, required=True, help="文本BERT基类模型路径")
    return parser


def add_simcse_arguments(parser):
    """simcse模型相关配置"""
    group = parser.add_argument_group("simcse", description="simcse模型相关配置")
    group.add_argument("--mode", type=str, choices=["sup", "unsup"], default="sup", help="训练模式，分别包含监督和非监督")
    group.add_argument("--temperature", type=float, default=0.05, help="温度系数")
    group.add_argument("--pooler", type=str, choices=["cls", "pooler", "last-avg", "first-last-avg"], default="cls", help="向量获取方式")
    return parser


def add_hyper_parameters_arguments(parser):
    """通用超参数配置"""
    group = parser.add_argument_group("hyper_params", description="通用超参数配置")
    group.add_argument("--epochs", type=int, default=1, help="训练轮次")
    group.add_argument("--batch_size", type=int, default=64, help="训练批次大小")
    group.add_argument("--learning_rate", type=float, default=3e-5, help="学习速率")
    group.add_argument("--max_seq_length", type=int, default=64, help="最大序列长度")
    group.add_argument("--max_grad_norm", type=float, default=1.0, help="最大梯度裁剪")
    group.add_argument("--dropout", type=float, default=0.1,
                        help="sup监督模式时，dropout值和config.json配置里面一致，默认为0.1；非监督可加大dropout，建议值为0.3")
    group.add_argument("--evaluation_per_epoch", type=int, default=5, help="每个Epoch评估多少次")
    return parser


def get_arguments():
    """参数配置及验证"""
    parser = argparse.ArgumentParser()
    parser = add_dataset_arguments(parser)
    parser = add_simcse_arguments(parser)
    parser = add_hyper_parameters_arguments(parser)
    args = parser.parse_args()

    device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
    args.device = device

    args.output_hidden_states = False
    if args.pooler == "first-last-avg":
        args.output_hidden_states = True

    args.model_output_dir = os.path.join(args.output_dir, f"{datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')}")
    summary_log_dir = os.path.join(args.output_dir, f"{datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')}/logs")
    if not os.path.isdir(summary_log_dir):
        os.makedirs(summary_log_dir)
    args.summary_log_dir = summary_log_dir
    return args


if __name__ == "__main__":
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    setup_seed(1)
    parser_args = get_arguments()
    main(parser_args)
