# coding: UTF-8
import os
import torch
from sklearn import metrics
import time
from torch.utils.data import DataLoader
import copy
import logging
from transformers import get_linear_schedule_with_warmup
from torch.optim import AdamW
from torch.autograd import Variable
from tqdm import tqdm  # 新增：导入进度条库
from utils import convert_examples_to_features, BuildDataSet

logger = logging.getLogger(__name__)


def train(
        config,  # 模型训练配置参数对象
        model,  # 待训练的模型实例
        tokenizer,  # 分词器，用于文本预处理
        train_data=None,  # 训练数据集列表
        dev_data=None,  # 验证数据集列表
):
    dev_acc = 0.  # 验证集准确率初始值
    # 复制模型并移动到指定设备（CPU/GPU）
    model_example = copy.deepcopy(model).to(config.device)
    best_model = None  # 最优模型存储变量

    if train_data:
        # 记录训练数据样本数量
        config.train_num_examples = len(train_data)
        # 将训练数据转换为模型可接受的特征格式
        train_features = convert_examples_to_features(
            examples=train_data,
            tokenizer=tokenizer,
            max_length=config.pad_size,  # 序列最大长度
            data_type='train'  # 数据类型标记
        )
        # 构建训练数据集对象
        train_dataset = BuildDataSet(train_features)
        # 构建训练数据加载器，设置批量大小和打乱数据
        train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)

        # 验证数据处理流程
        if dev_data is not None:
            config.dev_num_examples = len(dev_data)
            dev_features = convert_examples_to_features(
                examples=dev_data,
                tokenizer=tokenizer,
                max_length=config.pad_size,
                data_type='dev'
            )
            dev_dataset = BuildDataSet(dev_features)
            # 验证数据加载器不打乱数据
            dev_loader = DataLoader(dev_dataset, batch_size=config.batch_size, shuffle=False)
        else:
            dev_loader = None

        # 调用核心训练函数（新增进度条总览）
        if config.show_progress:
            total_steps = len(train_loader) * config.num_train_epochs
            with tqdm(total=total_steps, desc="Training",
                      bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]") as total_pbar:
                model_train(config, model_example, train_loader, dev_loader, total_pbar)
        else:
            model_train(config, model_example, train_loader, dev_loader)


def model_train(config, model, train_iter, dev_iter=None, total_pbar=None):
    start_time = time.time()  # 记录训练开始时间

    # 优化器参数配置：指定不应用权重衰减的参数
    no_decay = ["bias", "LayerNorm.weight"]
    # 区分基础BERT模块参数（用于设置不同学习率）
    diff_part = ["bert.embeddings", "bert.encoder"]
    if config.diff_learning_rate is False:
        # 不使用差异化学习率时的参数分组
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
                "weight_decay": config.weight_decay,
            },
            {
                "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
                "weight_decay": 0.0
            },
        ]
        # 初始化AdamW优化器
        optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate)
    else:
        logger.info("use the diff learning rate")
        # 使用差异化学习率时的参数分组（基础BERT模块与分类头使用不同学习率）
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in model.named_parameters() if
                           not any(nd in n for nd in no_decay) and any(nd in n for nd in diff_part)],
                "weight_decay": config.weight_decay,
                "lr": config.learning_rate  # 基础BERT模块学习率
            },
            {
                "params": [p for n, p in model.named_parameters() if
                           any(nd in n for nd in no_decay) and any(nd in n for nd in diff_part)],
                "weight_decay": 0.0,
                "lr": config.learning_rate
            },
            {
                "params": [p for n, p in model.named_parameters() if
                           not any(nd in n for nd in no_decay) and not any(nd in n for nd in diff_part)],
                "weight_decay": config.weight_decay,
                "lr": config.head_learning_rate  # 分类头学习率（通常高于基础模块）
            },
            {
                "params": [p for n, p in model.named_parameters() if
                           any(nd in n for nd in no_decay) and not any(nd in n for nd in diff_part)],
                "weight_decay": 0.0,
                "lr": config.head_learning_rate
            },
        ]
        optimizer = AdamW(optimizer_grouped_parameters)

    # 计算总训练步数（批次数量×训练轮数）
    t_total = len(train_iter) * config.num_train_epochs
    # 初始化学习率调度器（带warmup的线性衰减）
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=t_total * config.warmup_proportion,  # warmup步数比例
        num_training_steps=t_total
    )

    # 记录训练配置信息
    logger.info("***** Running training *****")
    logger.info("  Train Num examples = %d", config.train_num_examples)
    logger.info("  Dev Num examples = %d", config.dev_num_examples)
    logger.info("  Num Epochs = %d", config.num_train_epochs)
    logger.info("  Instantaneous batch size GPU/CPU = %d", config.batch_size)
    logger.info("  Total optimization steps = %d", t_total)
    logger.info("  Train device:%s", config.device)

    model_name = config.models_name  # 模型名称
    global_batch = 0  # 全局批次计数器
    dev_best_acc = 0  # 最佳验证集准确率
    last_improve = 0  # 上次验证集提升的批次间隔
    flag = False  # 训练停滞标记

    best_model = copy.deepcopy(model)  # 初始化最佳模型

    # 开始训练轮次循环
    for epoch in range(config.num_train_epochs):
        if config.show_progress:
            epoch_train_iter = tqdm(train_iter, desc=f"Epoch {epoch + 1}/{config.num_train_epochs}",
                                    bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
                                    leave=False)
        else:
            epoch_train_iter = train_iter

        for epoch_batch, (input_ids, attention_mask, token_type_ids, labels) in enumerate(epoch_train_iter):
            global_batch += 1
            model.train()  # 设置模型为训练模式

            # 将数据转换为Variable并移动到指定设备
            input_ids = Variable(input_ids).to(config.device)
            attention_mask = Variable(attention_mask).to(config.device)
            token_type_ids = Variable(token_type_ids).to(config.device)
            labels_tensor = Variable(labels).to(config.device)

            # 前向传播：获取模型输出和损失
            outputs, loss = model(input_ids, attention_mask, token_type_ids, labels_tensor)

            # 反向传播准备：清空梯度
            model.zero_grad()
            # 反向传播：计算梯度
            loss.backward()
            # 优化器：更新模型参数
            optimizer.step()
            # 学习率调度器：更新学习率
            scheduler.step()

            if total_pbar is not None:
                total_pbar.update(1)

        if dev_iter is not None:
            dev_acc, dev_loss = model_evaluate(config, model, dev_iter)
            if dev_acc > dev_best_acc:
                dev_best_acc = dev_acc
                model_save(config, model, name='best_' + model_name)

        if config.show_progress:
            epoch_train_iter.close()

    if config.show_progress:
        total_pbar.close()


def model_evaluate(config, model, data_iter):
    model.eval()  # 设置模型为评估模式
    loss_total = 0  # 累积损失
    predict_all = []  # 存储所有预测结果
    labels_all = []  # 存储所有真实标签
    with torch.no_grad():  # 禁用梯度计算
        # 包装验证数据加载器为进度条（新增）
        if config.show_progress:
            data_iter = tqdm(data_iter, desc="Evaluating",
                             bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]")
        for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(data_iter):
            input_ids = Variable(input_ids).to(config.device)
            attention_mask = Variable(attention_mask).to(config.device)
            token_type_ids = Variable(token_type_ids).to(config.device)
            labels_tensor = Variable(labels).to(config.device)

            # 前向传播获取输出和损失（评估模式下损失仅用于记录）
            outputs, loss = model(input_ids, attention_mask, token_type_ids, labels_tensor)
            # 获取模型预测的类别索引
            predic = torch.max(outputs.data, 1)[1].cpu()
            # 累积真实标签和预测结果
            predict_all.extend(predic)
            labels_all.extend(labels)
            loss_total += loss.item()
        # 计算验证集准确率
        dev_acc = metrics.accuracy_score(labels_all, predict_all)
    # 返回验证集准确率和平均损失
    return dev_acc, loss_total / len(data_iter)


def model_save(config, model, name=None):
    # 确保模型保存路径存在
    if not os.path.exists(config.save_path):
        os.makedirs(config.save_path)
    # 生成模型保存文件名
    if name is not None:
        file_name = os.path.join(config.save_path, name + '.pkl')
    else:
        file_name = os.path.join(config.save_path, config.save_file + '.pkl')
    # 保存模型参数
    torch.save(model.state_dict(), file_name)
    logger.info("model saved, path: %s", file_name)


def model_load(config, model, device='cpu'):
    # 生成模型加载路径
    file_name = os.path.join(config.save_path, config.save_file + '.pkl')
    logger.info('loading model: %s', file_name)
    # 加载模型参数并映射到指定设备
    model.load_state_dict(torch.load(file_name,
                                     map_location=device if device == 'cpu' else "{}:{}".format(device, 0)))