import datetime
import os


import torch
import math
import time

from sklearn.model_selection import StratifiedKFold

from Config import Config, Paths, device
from tqdm.auto import tqdm
from Data import collate, Dataset, tokenizer
import numpy as np
from torch.utils.data import DataLoader
from Model import Model
from Utils import LOGGER, get_optimizer_params, sigmoid, get_ROC_AUC, get_F1, get_best_F1
from torch.optim import AdamW
from torch.optim.lr_scheduler import OneCycleLR
import torch.nn as nn
import gc
import pandas as pd
from PlotConfusionMatrix import plot_confusion_matrix
from Process import analyze_df, analyze_text_length_distribution


class AverageMeter(object):


    def __init__(self):
        self.reset() # 每个epoch开始时重置所有统计量

    def reset(self):
        self.val = 0   # 当前值
        self.avg = 0   # 平均值
        self.sum = 0   # 总和
        self.count = 0 # 样本数量

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def asMinutes(s):

    m = math.floor(s / 60) # 总分钟，向下取整
    s -= m * 60
    return '%dm %ds' % (m, s)


def timeSince(since, percent):

    now = time.time()
    s = now - since
    estimated_s = s / (percent)
    remaining_s =  estimated_s - s
    return '%s (remain %s)' % (asMinutes(s), asMinutes(remaining_s))


def train_epoch(train_loader, model, criterion, optimizer, epoch, scheduler, device):
    """单个epoch训练过程"""

    model.train()
    scaler = torch.cuda.amp.GradScaler(enabled=Config.APEX)
    losses = AverageMeter()
    start = end = time.time()
    global_step = 0

    grad_norm = torch.tensor(0.0).to(device)  # 默认值

    with tqdm(train_loader, unit="train_batch", desc='Train') as tqdm_train_loader:
        for step, batch in enumerate(tqdm_train_loader):


            inputs = batch.pop("inputs")
            labels = batch.pop("labels")
            inputs = collate(inputs)
            for k, v in inputs.items():
                inputs[k] = v.to(device)
            labels = labels.to(device)
            batch_size = labels.size(0)

            with torch.cuda.amp.autocast(enabled=Config.APEX):
                y_preds = model(inputs)
                loss = criterion(y_preds, labels.unsqueeze(1))
                loss = loss / Config.GRADIENT_ACCUMULATION_STEPS
            losses.update(loss.item(), batch_size)
            scaler.scale(loss).backward()



            if (step + 1) % Config.GRADIENT_ACCUMULATION_STEPS == 0: # 当累计处理完 GRADIENT_ACCUMULATION_STEPS 个batch时触发参数更新
                scaler.unscale_(optimizer)

                grad_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(),
                    Config.MAX_GRAD_NORM
                )

                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()
                global_step += 1
                if Config.BATCH_SCHEDULER:
                    scheduler.step()


            if (step % Config.PRINT_FREQ == 0 and step != 0) or step == (len(train_loader) - 1):
                print('Epoch: [{0}][{1}/{2}] '

                      'Loss: {loss.avg:.4f} '

                      'Grad: {grad_norm:.4f} (clipped at {max_grad_norm}) '
                      'Encoder-LR:{lrs[0]: .2e} '
                      'Decoder-LR:{lrs[1]: .2e} '


                      .format(epoch + 1, step, len(train_loader), #当前epoch数（从1开始） 当前batch 总batch数
                              remain=timeSince(start, float(step + 1) / len(train_loader)),
                              loss=losses,
                              grad_norm=grad_norm.item(), # 提供梯度全局范数值
                              max_grad_norm=Config.MAX_GRAD_NORM,  # 显示裁剪阈值

                              lrs=[group['lr'] for group in optimizer.param_groups],
                              ))

    return losses.avg # 返回平均损失


def valid_epoch(valid_loader, model, criterion, device):
    model.eval()  # 设置为评估模式，关闭Dropout/BatchNorm的train模式
    losses = AverageMeter()
    prediction_dict = {} # 存储预测结果
    preds = [] # 临时存储batch预测结果
    all_ids = []
    start = time.time()  # 追踪时间
    with tqdm(valid_loader, unit="valid_batch", desc='Validation') as tqdm_valid_loader:
        for step, batch in enumerate(tqdm_valid_loader): # 加载单一batch的数据
            inputs = batch.pop("inputs")
            labels = batch.pop("labels")
            ids = batch.pop("ids") # 样本ID
            all_ids.extend(ids)
            inputs = collate(inputs)

            for k, v in inputs.items():
                inputs[k] = v.to(device)
            labels = labels.to(device)
            batch_size = labels.size(0)

            with torch.no_grad(): # 禁用梯度计算上下文
                y_preds = model(inputs)
                loss = criterion(y_preds, labels.unsqueeze(1))  # 标签匹配模型输出维度计算损失
            if Config.GRADIENT_ACCUMULATION_STEPS > 1:
                loss = loss / Config.GRADIENT_ACCUMULATION_STEPS
            losses.update(loss.item(), batch_size)  # 更新损失
            preds.append(y_preds.to('cpu').numpy())  # 预测结果存储 单个batch形状 [batch_size, 1] 模型原始输出转为CPU格式 释放GPU异步传输到主机内存
            """
            preds = [
                np.array([[0.1], [0.2], [0.3]]),  # batch 1 (3 samples)
                np.array([[0.4], [0.5]]),         # batch 2 (2 samples)
                np.array([[0.6], [0.7], [0.8]])   # batch 3 (3 samples)
            ]
            """


            if (step % Config.PRINT_FREQ == 0 and step!= 0) or step == (len(valid_loader) - 1): # 每20步和最后一个batch
                print('EVAL: [{0}/{1}] '
                      'Elapsed {remain:s} '
                      'Loss: {loss.avg:.4f} '
                      .format(step, len(valid_loader),
                              loss=losses, # 滑动平均损失
                              remain=timeSince(start, float(step + 1) / len(valid_loader))))

    prediction_dict["predictions"] = np.concatenate(preds)  # 将验证过程中分批生成的预测结果（preds列表）合并为一个完整的NumPy数组，存储在字典中以便后续分析

    prediction_dict["ids"] = all_ids # 将验证集中每个样本的唯一标识符（ID）与模型预测结果严格对齐存储，一一对应，确保预测结果的可追溯性
    return losses.avg, prediction_dict # 返回平均损失和预测结果


def N_Fold_train_loop(folds, fold):
    LOGGER.info(f"—————————————— Fold: is {fold} training ——————————————")

    train_folds = folds[folds['fold'] != fold].reset_index(drop=True) # 重置行索引并丢弃旧索引，确保新DataFrame的索引连续
    valid_folds = folds[folds['fold'] == fold].reset_index(drop=True)

    valid_labels = valid_folds['generated'].values # 获取验证集的真实标签

    train_dataset = Dataset(Config, train_folds, tokenizer)
    valid_dataset = Dataset(Config, valid_folds, tokenizer)

    train_loader = DataLoader(train_dataset,
                              batch_size=Config.BATCH_SIZE_TRAIN,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=Config.BATCH_SIZE_VALID,
                              shuffle=False,
                              pin_memory=True,
                              drop_last=False)

    model = Model(Config, config_path=None, pretrained=True)
    torch.save(model.config, "./input/save/config.pth")
    model.to(device)


    optimizer_params = get_optimizer_params(model,
                                                encoder_lr=Config.ENCODER_LR,
                                                decoder_lr=Config.DECODER_LR,
                                                weight_decay=Config.WEIGHT_DECAY)

    optimizer = AdamW(optimizer_params,
                      lr=1e-6,
                      eps=Config.EPS,
                      betas=Config.BETAS)

    scheduler = OneCycleLR(
        optimizer, # 绑定的优化器（AdamW）
        max_lr = [
            Config.ENCODER_LR*2,
            Config.DECODER_LR*2,
            Config.DECODER_LR*2
        ],
        epochs=Config.EPOCHS, # 总训练轮数
        steps_per_epoch=len(train_loader) // Config.GRADIENT_ACCUMULATION_STEPS, # 每个epoch的迭代步数（batch数）
        pct_start=0.3, # 学习率上升阶段占总训练步数的占比（10%的步数用于上升）
        anneal_strategy="cos", # 退火策略（余弦下降）改线性
        final_div_factor=500
    )

    criterion = nn.BCEWithLogitsLoss() # 自动应用Sigmoid + 二进制交叉熵 & 数值稳定（内置log-sum-exp技巧）

    best_score = -np.inf

    best_model_predictions=None


    early_stopping_patience = 2 # 连续N轮不下降则
    no_improve_epochs = 0
    best_val_loss = float('inf')

    for epoch in range(Config.EPOCHS):


        start_time = time.time() # 记录开始时间


        avg_loss = train_epoch(train_loader, model, criterion, optimizer, epoch, scheduler, device)


        avg_val_loss, prediction_dict = valid_epoch(valid_loader, model, criterion, device)
        predictions = prediction_dict["predictions"] # 获取模型预测结果

        threshold = 0.5
        RA = get_ROC_AUC(valid_labels, sigmoid(predictions)) # 计算评估指标（AUC）
        F1 = get_F1(valid_labels, sigmoid(predictions),threshold)

        elapsed = time.time() - start_time

        LOGGER.info(
            f'Epoch {epoch + 1} - avg_train_loss: {avg_loss:.4f}  avg_val_loss: {avg_val_loss:.4f}  time: {elapsed:.0f}s')
        LOGGER.info(f'Epoch {epoch + 1} - RA: {RA:.4f} - F1: {F1:.4f} - Threshold: {threshold:.4f}')

        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            no_improve_epochs = 0

            score = RA

            if score > best_score:
                best_score = score
                LOGGER.info(f'Epoch {epoch + 1} - Save Best Score: {best_score:.4f} Model')
                torch.save(model.state_dict(),
                           os.path.join(Paths.OUTPUT_DIR, f"fold_{fold}_best.pth"))
                best_model_predictions = predictions  # 保存最佳预测结果
        else:
            no_improve_epochs += 1
            if no_improve_epochs >= early_stopping_patience:
                LOGGER.info(f"早停：在第 {epoch} 轮停止")
                break

    valid_folds["predictions"] = best_model_predictions  # 是原始预测值！
    valid_folds["predictions"] = valid_folds["predictions"].apply(sigmoid)

    torch.cuda.empty_cache() # 释放GPU显存碎片
    gc.collect()  # 触发Python垃圾回收

    return valid_folds # 返回带有2个预测结果的验证集数据

def get_result(df):
    labels = df["generated"].values # 提取真实标签（0/1）
    predictions = df["predictions"].values # 提取模型预测概率
    RA = get_ROC_AUC(labels, predictions)
    best_threshold, best_F1 = get_best_F1(labels, predictions)
    half_F1 = get_F1(labels, predictions, 0.5)
    LOGGER.info(f'RA: {RA:<.4f} - BestF1: {best_F1:<.4f} - BestThreshold: {best_threshold:<.4f} - HalfF1: {half_F1:<.4f}')


if __name__ == '__main__':
    LOGGER.info("—————————————— N-Fold Data Training ——————————————")

    train_df = pd.read_csv(Paths.TRAIN_DATA, sep=',')
    train_df.sample(frac=1,random_state=Config.SEED).reset_index(drop=True, inplace=True)
    assert {'id', 'text', 'generated'}.issubset(train_df.columns), "数据集缺少必要字段"
    analyze_df(train_df)

    skf = StratifiedKFold(n_splits=Config.FOLDS, shuffle=True, random_state=Config.SEED)

    train_df['fold'] = -1  # 初始化

    for fold, (_, valid_idx) in enumerate(skf.split(train_df, train_df['generated'])):
        train_df.loc[valid_idx, 'fold'] = fold

    print("各折类别分布:")
    print(train_df.groupby('fold')['generated'].value_counts().unstack())

    oof_df = pd.DataFrame() # Out-of-Fold Predictions 创建一个空DataFrame，用于存储所有折的验证集预测结果

    for fold in range(Config.FOLDS):
        _oof_df = N_Fold_train_loop(train_df, fold)  # 训练并返回当前折的验证集预测
        oof_df = pd.concat([oof_df, _oof_df]) # 合并当前折结果到总表

        LOGGER.info(f"—————————————— Fold: {fold} result ——————————————")
        get_result(_oof_df)

    oof_df = oof_df.reset_index(drop=True) # 重置索引（避免旧索引干扰）
    LOGGER.info(f"—————————————— Cross-Validation ——————————————")
    oof_df.to_csv(Paths.OUTPUT_DIR + '/oof_df.csv', index=False) # 保存预测结果
    get_result(oof_df) # 计算全体交叉验证的平均分数

    print(oof_df)
