"""
主要训练文件，实现了模型训练、验证和测试的完整流程。
包含以下核心功能：
1. 梯度裁剪 - 稳定训练过程，避免梯度爆炸
2. 多损失记录 - 包括CE Loss、Global/Local MI Loss等
3. 动态模型保存 - 根据验证集AUC动态保存最佳模型
4. 多重评估指标 - 包括AUC、ACC、LogLoss等
5. 测试模式支持 - 通过--test参数启用
6. Tensorboard可视化 - 实时监控训练过程
"""

import shutil

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
import sys
import logging
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss

from losses.mi_loss import *
from parameters import parse_args
from dataset import ReadDataset, MyDataset, mixup_data, mixup_criterion
from utils import plt_tensorboard,remove_prefix,create_table
from models.MI_Net import MI_Net

import warnings
warnings.simplefilter("ignore", UserWarning)
class train_and_test_model():
    def __init__(self,args):
        """
        初始化训练类
        Args:
            args: 训练参数配置
        功能：
        1. 设置CUDA环境和优化
        2. 初始化网络模型
        3. 配置数据加载器
        4. 设置损失函数
        5. 配置优化器
        """
        self.test_loss=[]
        self.best_AUC =  0
        self.start_epoch = 1
        self.plt_tb = plt_tensorboard(args)
        self.args = args
        
        # 检查 CUDA 可用性并优化设置
        self.use_cuda = torch.cuda.is_available() and args.gpu_num != '-1'
        if self.use_cuda:
            # 优化设置
            torch.backends.cudnn.benchmark = True
            torch.backends.cudnn.deterministic = False
            torch.cuda.empty_cache()
            # 设置较大的 CUDA 预留内存
            torch.cuda.set_per_process_memory_fraction(0.95)
        
        self.device = torch.device("cuda" if self.use_cuda else "cpu")
        
        # 初始化网络 - 使用 ResNet-34 作为基础特征提取器
        self.net = MI_Net(
            model='resnet',  # 使用 ResNet 作为基础模型
            num_regions=args.num_LIBs,  # 论文中使用 4 个局部信息块
            num_classes=2,  # 二分类任务：真实/伪造
            freeze_fc=False,
            dropout=args.dropout if hasattr(args, 'dropout') else 0.5
        )
        
        if self.use_cuda:
            self.device_ids = list(map(int, args.gpu_num.split(',')))
            if len(self.device_ids) > 1:
                self.net = nn.DataParallel(self.net, device_ids=self.device_ids)
            self.net = self.net.cuda(self.device)
            logging.info(f"Using GPU: {self.device} - {torch.cuda.get_device_name()}")
            logging.info(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
        else:
            logging.info("Using CPU for training")
            
        # 数据集准备 - 按照 70:15:15 的比例划分
        self.dataset = ReadDataset(args.dataset)
        self.train_dataset = MyDataset(
            self.dataset.data['train'],
            self.dataset.labels['train'],
            size=args.size,
            test=False,  # 训练集不使用测试模式
            normalize={"mean": [0.485, 0.456, 0.406],
                      "std": [0.229, 0.224, 0.225]}
        )
        self.val_dataset = MyDataset(
            self.dataset.data['val'],
            self.dataset.labels['val'],
            size=args.size,
            test=True,
            normalize={"mean": [0.485, 0.456, 0.406],
                      "std": [0.229, 0.224, 0.225]}
        )
        self.test_dataset = MyDataset(
            self.dataset.data['test'],
            self.dataset.labels['test'],
            size=args.size,
            test=True,
            normalize={"mean": [0.485, 0.456, 0.406],
                      "std": [0.229, 0.224, 0.225]}
        )

        # 数据加载器配置
        try:
            self.train_loader = DataLoader(
                self.train_dataset, 
                shuffle=True, 
                batch_size=args.bs,
                num_workers=args.num_workers if self.use_cuda else 0,
                pin_memory=self.use_cuda
            )
            logging.info(f"Train dataset size: {len(self.train_dataset)}")
        except:
            logging.error("Failed to create train_loader")
            
        try:
            self.val_loader = DataLoader(
                self.val_dataset, 
                shuffle=False, 
                batch_size=args.test_bs,
                num_workers=2 if self.use_cuda else 0,
                pin_memory=self.use_cuda
            )
            logging.info(f"Validation dataset size: {len(self.val_dataset)}")
        except:
            logging.error("Failed to create val_loader")
            
        # 添加测试数据加载器初始化
        try:
            self.test_loader = DataLoader(
                self.test_dataset,
                shuffle=False,
                batch_size=args.test_bs,
                num_workers=2 if self.use_cuda else 0,
                pin_memory=self.use_cuda
            )
            logging.info(f"Test dataset size: {len(self.test_dataset)}")
        except:
            logging.error("Failed to create test_loader")
        
        # 损失函数配置
        self.loss_function = loss_functions(
            method='mi',
            mi_calculator=self.args.mi_calculator,
            temperature=self.args.temperature,
            bml_method=self.args.balance_loss_method,
            scales=self.args.scales,
            lil_loss=self.args.lil_loss,  # 局部信息损失
            gil_loss=self.args.gil_loss,  # 全局信息损失
            device=self.device
        )
        
        if self.args.resume_model:
            self.load_model(self.args.resume_model)
            
        # 优化器配置 - 使用 AdamW
        self.update_lr()
    def load_model(self,path):
        logging.info(f'Resuming training from {path}')
        loaded_params = torch.load(f"{path}",
                                   map_location=torch.device(self.device))
        state_dict = loaded_params['state_dict']
        try:
            self.net.load_state_dict(state_dict)
        except:
            state_dict = remove_prefix(state_dict, 'module.')
            self.net.load_state_dict(state_dict)
        self.start_epoch = loaded_params['epoch'] + 1

        logging.warning(f"Loaded parameters from saved model: current epoch is"
                        f" {self.start_epoch}")
    def update_lr(self):
        # 更平缓的warmup，使用指数增长
        if self.start_epoch < 10:  # warmup阶段
            lr = self.args.lr * (self.start_epoch / 10) ** 1.5  # 使用1.5次方使增长更平缓
        else:
            # 使用更缓慢的余弦退火
            progress = (self.start_epoch - 10) / (self.args.epoch - 10)
            # 在前25轮保持更高的学习率
            if self.start_epoch < 25:
                lr = self.args.lr * (0.5 * (1 + np.cos(progress * np.pi * 0.2)))  # 更缓慢的衰减
            else:
                # 最后7轮的衰减也放缓
                final_progress = (self.start_epoch - 25) / 7
                lr = self.args.lr * 0.5 * (1 + np.cos((0.2 + 0.6 * final_progress) * np.pi))  # 降低衰减幅度
            lr = max(lr, self.args.lr * 0.05)  # 提高最小学习率限制
            
        self.optimizer = torch.optim.AdamW([  # 继续使用AdamW优化器
            {'params': self.net.parameters(), 'lr': lr, 'weight_decay': self.args.weight_decay,
             'betas': (0.95, 0.999)},  # 保持较大的动量
            {'params': self.loss_function.balance_loss.parameters(), 'weight_decay': self.args.weight_decay}
        ])

    def train(self):
        """
        训练主循环
        功能：
        1. 实现梯度裁剪，避免梯度爆炸
        2. 记录多种损失值（CE、Global MI、Local MI）
        3. 动态保存最佳模型
        4. 支持mixup数据增强
        5. 通过Tensorboard记录训练过程
        """
        epbar = tqdm(total=self.args.epoch)
        logging.info(f"Starting Training...")
        for epoch in range(self.start_epoch, self.args.epoch+self.start_epoch):
            self.update_lr()  # 更新学习率
            self.net.train()
            avg_loss = []
            avg_ce_loss=[]
            avg_global_mi_loss=[]
            avg_local_loss=[]
            self.plt_tb.reset_metrics()
            
            for i,(data,y) in enumerate(self.train_loader):
                data = data.cuda(self.device)
                y=y.cuda(self.device)

                # 在前15个epoch减少mixup概率，帮助模型先学习原始数据
                mixup_prob = min(0.8, epoch/15 * 0.8) if epoch < 15 else 0.8
                if self.args.mixup and np.random.random() < mixup_prob:
                    data, y_a, y_b, lam = mixup_data(data, y, self.args.alpha)
                    out = self.net(data)
                    losses = mixup_criterion(self.loss_function.criterion, out, y_a, y_b, lam)
                else:
                    out = self.net(data)
                    losses = self.loss_function.criterion(out, y)
                
                # 调整损失权重计算方式
                if epoch < 10:  # warmup阶段
                    # 更平缓的CE loss权重衰减
                    ce_weight = 0.95 - 0.05 * (epoch / 10) ** 1.5  # 使用1.5次方使衰减更平缓
                    loss = losses[0] * ce_weight + self.loss_function.balance_mult_loss(losses) * (1 - ce_weight)
                elif epoch < 25:  # 主要训练阶段
                    # 更平缓地增加MI loss的权重
                    progress = (epoch - 10) / 15
                    mi_weight = 0.2 + 0.5 * progress  # 最大权重限制在0.7
                    loss = losses[0] * (1 - mi_weight) + self.loss_function.balance_mult_loss(losses) * mi_weight
                else:  # 最后阶段
                    # 保持稳定的权重比例
                    loss = losses[0] * 0.35 + self.loss_function.balance_mult_loss(losses) * 0.65
                
                if torch.isnan(loss).any():
                    logging.info("loss is NAN, so stop training...")
                    sys.exit()
                
                self.optimizer.zero_grad()
                loss.backward()
                
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(self.net.parameters(), max_norm=1.0)
                
                self.optimizer.step()

                avg_loss.append(loss.item())
                avg_ce_loss.append(losses[0].item())
                if self.args.gil_loss:
                    avg_global_mi_loss.append(losses[1].item())
                if self.args.lil_loss:
                    avg_local_loss.append(losses[-1].item())

                self.plt_tb.accumulate_metrics(out['p_y_given_z'], y, loss)

                if i % 10 == 0:
                    log_info=f"Epoch {epoch}, Step {i}, Loss: {np.mean(avg_loss):.4f}, CE: {np.mean(avg_ce_loss):.4f}"
                    if self.args.gil_loss:
                        log_info+=f", MI: {np.mean(avg_global_mi_loss):.4f}"
                    if self.args.lil_loss:
                        log_info += f", Local: {np.mean(avg_local_loss):.4f}"
                    logging.info(log_info)

            epbar.update(1)
            metric = self.plt_tb.report_metrics(epoch,
                                                tb_writer=self.plt_tb.tb_writer,
                                                tb_prefix=f'Training')
            logging.info(f"Epoch: {epoch} Training Average loss: {np.mean(avg_loss):.4f}")
            
            # 每个epoch都进行验证，并根据验证集性能保存最佳模型
            val_metrics = self.test(self.net, epoch, val=True)
            if val_metrics['AUC'] > self.best_AUC:
                self.best_AUC = val_metrics['AUC']
                self.save_model(epoch, best=True)
                logging.info(f"New best model saved! Validation AUC: {self.best_AUC:.4f}")
            
            # 仅做测试集评估，不保存模型
            self.test(self.net, epoch, val=False)

    def test(self,net, epoch,val=False):
        """
        模型测试/验证
        Args:
            net: 待评估的模型
            epoch: 当前训练轮数
            val: 是否为验证模式
        功能：
        1. 计算多个评估指标（AUC、ACC、LogLoss）
        2. 支持验证集和测试集评估
        3. 记录详细的评估日志
        4. 通过Tensorboard可视化评估结果
        """
        net.eval()
        self.plt_tb.reset_metrics()
        avg_total_loss = []
        avg_ce_loss = []
        avg_global_mi_loss = []
        avg_local_loss = []
        all_preds = []
        all_labels = []
        
        with torch.no_grad():
            if val:
                loader = self.val_loader
                dataset_type = "Validation"
            else:
                loader = self.test_loader
                dataset_type = "Test"

            for i,(data,y) in tqdm(enumerate(loader)):
                data = data.to(self.device)
                y = y.to(self.device)
                out = net(data)
                losses = self.loss_function.criterion(out, y)
                loss = self.loss_function.balance_mult_loss(losses)

                # 收集预测结果和标签
                probs = torch.softmax(out['p_y_given_z'], dim=1)
                all_preds.extend(probs[:,1].cpu().numpy())
                all_labels.extend(y.cpu().numpy())

                avg_total_loss.append(loss.item())
                avg_ce_loss.append(losses[0].item())
                if self.args.gil_loss:
                    avg_global_mi_loss.append(losses[1].item())
                if self.args.lil_loss:
                    avg_local_loss.append(losses[-1].item())

        # 计算所有指标
        all_preds = np.array(all_preds)
        all_labels = np.array(all_labels)
        
        # 计算 AUC
        auc = roc_auc_score(all_labels, all_preds)
        
        # 计算 ACC
        pred_labels = (all_preds > 0.5).astype(int)
        acc = accuracy_score(all_labels, pred_labels)
        
        # 计算 LogLoss
        eps = 1e-15
        all_preds = np.clip(all_preds, eps, 1 - eps)
        logloss = log_loss(all_labels, all_preds)

        # 记录所有指标
        metrics = {
            'AUC': auc,
            'ACC': acc * 100,  # 转换为百分比
            'LogLoss': logloss,
            'Loss': np.mean(avg_total_loss)
        }

        # 记录到Tensorboard
        prefix = "Validation" if val else "Test"
        self.plt_tb.tb_writer.add_scalar(f'{prefix}/AUC_value', metrics['AUC'], epoch)
        self.plt_tb.tb_writer.add_scalar(f'{prefix}/Accuracy_value', metrics['ACC'], epoch)
        self.plt_tb.tb_writer.add_scalar(f'{prefix}/LogLoss_value', metrics['LogLoss'], epoch)
        self.plt_tb.tb_writer.add_scalar(f'{prefix}/Loss_value', metrics['Loss'], epoch)
        self.plt_tb.tb_writer.flush()

        # 记录到日志
        log_info = f"\n{dataset_type} Results - Epoch {epoch}:\n"
        log_info += f"AUC: {metrics['AUC']:.4f}\n"
        log_info += f"ACC: {metrics['ACC']:.2f}%\n"
        log_info += f"LogLoss: {metrics['LogLoss']:.4f}\n"
        log_info += f"Loss: {metrics['Loss']:.4f}"
        if self.args.gil_loss:
            log_info += f"\nGlobal MI Loss: {np.mean(avg_global_mi_loss):.4f}"
        if self.args.lil_loss:
            log_info += f"\nLocal MI Loss: {np.mean(avg_local_loss):.4f}"
        logging.info(log_info)

        return metrics

    def save_model(self,epoch,best=False):
        """
        保存模型
        Args:
            epoch: 当前训练轮数
            best: 是否为最佳模型
        功能：
        1. 支持保存最佳模型和常规checkpoint
        2. 记录模型保存日志
        """
        if self.args.save_model:
            logging.info(f"Saving model to {self.args.save_path}/{self.args.name}.")
            saved_dict = {'state_dict': self.net.state_dict(),
                          'epoch': epoch}
            if best:
                model_name=f"{self.args.save_path}/{self.args.name}/model_best.pth"
                logging.info(f"Saving best model.")
            else:
                model_name = f"{self.args.save_path}/{self.args.name}/model_{epoch}.pth"
            torch.save(saved_dict, model_name)



def unNormalize(tensor,mean,std):
    for t, m, s in zip(tensor, mean, std):
        t.mul_(s).add_(m)
        # The normalize code -> t.sub_(m).div_(s)
    return tensor
if __name__ == "__main__":
    """
    主函数入口
    功能：
    1. 解析命令行参数
    2. 支持训练和测试两种模式
    3. 配置日志记录
    4. 输出详细的测试结果
    """
    args = parse_args()
    print(args)
    if not args.test:
        if args.name:
            os.makedirs(f"output/{args.name}", exist_ok=True)
        logging.basicConfig(filename=f"./logs/{args.name}.log",
                            filemode="w",
                            format='[%(asctime)s]%(levelname)s:%(message)s',
                            datefmt='%Y.%m.%d %I:%M:%S %p',
                            level=logging.INFO, )
        logging.warning(create_table(args))

    print(args.name)
    print('Mutual Information Calculator is :'+args.mi_calculator)
    print('dsadada',args.test)

    train_model=train_and_test_model(args)
    if not args.test:
        train_model.train()
        train_model.load_model(os.path.join(args.save_path,args.name,'model_best.pth'))
        train_model.test(train_model.net,0,val=False)
    else:
        print(f"\n正在加载最佳模型进行测试: {args.resume_model}")
        metrics = train_model.test(train_model.net, 0, val=False)
        print("\n========== 测试结果 ==========")
        print(f"AUC: {metrics['AUC']:.4f}")
        print(f"ACC: {metrics['ACC']:.2f}%")
        print(f"LogLoss: {metrics['LogLoss']:.4f}")
        print(f"Loss: {metrics['Loss']:.4f}")
        print("==============================\n")
