import datetime
import os


import torch
import math
import time

from sklearn.model_selection import StratifiedKFold

from Config import Config, Paths, device
from tqdm.auto import tqdm
from Data import collate, Dataset, tokenizer
import numpy as np
from torch.utils.data import DataLoader
from Model import Model
from N_Fold_Train import AverageMeter, timeSince, train_epoch, valid_epoch
from Utils import LOGGER, get_optimizer_params, sigmoid, get_ROC_AUC, get_F1, get_best_F1
from torch.optim import AdamW
from torch.optim.lr_scheduler import OneCycleLR
import torch.nn as nn
import gc
import pandas as pd
from PlotConfusionMatrix import plot_confusion_matrix
from Process import analyze_df, analyze_text_length_distribution

def Full_train_loop(train_df,valid_df):
    LOGGER.info(f"—————————————— Full training ——————————————")

    valid_labels = valid_df['generated'].values # 获取验证集的真实标签

    train_dataset = Dataset(Config, train_df, tokenizer)
    valid_dataset = Dataset(Config, valid_df, tokenizer)

    train_loader = DataLoader(train_dataset,
                              batch_size=Config.BATCH_SIZE_TRAIN,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=Config.BATCH_SIZE_VALID,
                              shuffle=False,
                              pin_memory=True,
                              drop_last=False)

    model = Model(Config, config_path=None, pretrained=True)
    torch.save(model.config, "./input/save/config.pth")
    model.to(device)


    optimizer_parameters = get_optimizer_params(model,
                                                encoder_lr=Config.ENCODER_LR,
                                                decoder_lr=Config.DECODER_LR,
                                                weight_decay=Config.WEIGHT_DECAY)

    optimizer = AdamW(optimizer_parameters,
                      lr=1e-6,
                      eps=Config.EPS,
                      betas=Config.BETAS)

    scheduler = OneCycleLR(
        optimizer, # 绑定的优化器（AdamW）
        max_lr = [
            Config.ENCODER_LR*2,
            Config.DECODER_LR*2,
            Config.DECODER_LR*2
        ],
        epochs=Config.EPOCHS, # 总训练轮数
        steps_per_epoch=len(train_loader) // Config.GRADIENT_ACCUMULATION_STEPS, # 每个epoch的迭代步数（batch数）
        pct_start=0.3, # 学习率上升阶段占总训练步数的占比（10%的步数用于上升）
        anneal_strategy="cos", # 退火策略（余弦下降）改线性
        final_div_factor=500
    )

    criterion = nn.BCEWithLogitsLoss() # 自动应用Sigmoid + 二进制交叉熵 & 数值稳定（内置log-sum-exp技巧）

    best_score = -np.inf

    best_model_predictions=None


    early_stopping_patience = 2 # 连续N轮不下降则
    no_improve_epochs = 0
    best_val_loss = float('inf')

    for epoch in range(Config.EPOCHS):


        start_time = time.time() # 记录开始时间


        avg_loss = train_epoch(train_loader, model, criterion, optimizer, epoch, scheduler, device)


        avg_val_loss, prediction_dict = valid_epoch(valid_loader, model, criterion, device)
        predictions = prediction_dict["predictions"] # 获取模型预测结果

        threshold = 0.5
        RA = get_ROC_AUC(valid_labels, sigmoid(predictions)) # 计算评估指标（AUC）
        F1 = get_F1(valid_labels, sigmoid(predictions),threshold)

        elapsed = time.time() - start_time

        LOGGER.info(
            f'Epoch {epoch + 1} - avg_train_loss: {avg_loss:.4f}  avg_val_loss: {avg_val_loss:.4f}  time: {elapsed:.0f}s')
        LOGGER.info(f'Epoch {epoch + 1} - RA: {RA:.4f} - F1: {F1:.4f} - Threshold: {threshold:.4f}')


        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            no_improve_epochs = 0

            score = RA

            if score > best_score:
                best_score = score
                LOGGER.info(f'Epoch {epoch + 1} - Save Best Score: {best_score:.4f} Model')
                torch.save(model.state_dict(),
                           os.path.join(Paths.OUTPUT_DIR, f"the_full.pth"))
                best_model_predictions = predictions  # 保存最佳预测结果
        else:
            no_improve_epochs += 1
            if no_improve_epochs >= early_stopping_patience:
                LOGGER.info(f"早停：在第 {epoch} 轮停止")
                break

    valid_df["predictions"] = best_model_predictions # 是原始预测值！
    valid_df["predictions"] = valid_df["predictions"].apply(sigmoid)

    torch.cuda.empty_cache() # 释放GPU显存碎片
    gc.collect()  # 触发Python垃圾回收

    return valid_df # 返回带有预测结果的验证集数据

def get_result(df):
    labels = df["generated"].values # 提取真实标签（0/1）
    predictions = df["predictions"].values # 提取模型预测概率
    RA = get_ROC_AUC(labels, predictions)
    best_threshold, best_F1 = get_best_F1(labels, predictions)
    half_F1 = get_F1(labels, predictions, 0.5)
    LOGGER.info(f'RA: {RA:<.4f} - BestF1: {best_F1:<.4f} - BestThreshold: {best_threshold:<.4f} - HalfF1: {half_F1:<.4f}')

if __name__ == '__main__':
    LOGGER.info("—————————————— Full Data Training ——————————————")

    train_df = pd.read_csv(Paths.TRAIN_DATA, sep=',')
    train_df.sample(frac=1,random_state=Config.SEED).reset_index(drop=True, inplace=True)
    assert {'id', 'text', 'generated'}.issubset(train_df.columns), "数据集缺少必要字段"
    analyze_df(train_df)

    valid_df = pd.read_csv(Paths.VALID_DATA, sep=',')
    valid_df.sample(frac=1, random_state=Config.SEED).reset_index(drop=True, inplace=True)
    assert {'id', 'text', 'generated'}.issubset(valid_df.columns), "数据集缺少必要字段"
    analyze_df(valid_df)

    results_df = Full_train_loop(train_df, valid_df)


    LOGGER.info(f"—————————————— Full:  result ——————————————")
    results_df.to_csv(Paths.OUTPUT_DIR + '/full_results.csv', index=False)
    get_result(results_df)


    print(results_df)
    plot_confusion_matrix(results_df,0.5)