import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import AIModel
from config import data_transforms, get_train_val_datasets, ROOT_DIR
import os
import torch.cuda
from sklearn.metrics import roc_auc_score, precision_score, recall_score, f1_score, confusion_matrix
import numpy as np
import json
from datetime import datetime

def calculate_metrics(y_true, y_pred, y_prob):
    """计算各种评估指标"""
    # 将预测概率转换为二分类标签
    y_pred_binary = (y_prob >= 0.5).astype(int)
    
    # 计算各项指标
    auc = roc_auc_score(y_true, y_prob)
    precision = precision_score(y_true, y_pred_binary)
    recall = recall_score(y_true, y_pred_binary)
    f1 = f1_score(y_true, y_pred_binary)
    conf_matrix = confusion_matrix(y_true, y_pred_binary)
    
    # 计算混淆矩阵的各个值
    tn, fp, fn, tp = conf_matrix.ravel()
    
    # 计算特异度
    specificity = tn / (tn + fp)
    
    return {
        'auc': auc,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'specificity': specificity,
        'confusion_matrix': conf_matrix
    }

def print_metrics(metrics, phase):
    """打印评估指标"""
    print(f"\n{phase} 评估指标:")
    print(f"AUC: {metrics['auc']:.4f}")
    print(f"精确率: {metrics['precision']:.4f}")
    print(f"召回率: {metrics['recall']:.4f}")
    print(f"F1分数: {metrics['f1']:.4f}")
    print(f"特异度: {metrics['specificity']:.4f}")
    print("\n混淆矩阵:")
    print(metrics['confusion_matrix'])

def save_metrics(metrics_dict, save_dir):
    """
    保存训练指标到JSON文件
    Args:
        metrics_dict: 包含训练指标的字典
        save_dir: 保存目录
    """
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 生成文件名（使用时间戳）
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"training_metrics_{timestamp}.json"
    filepath = os.path.join(save_dir, filename)
    
    # 将numpy数组转换为列表
    def convert_numpy(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, dict):
            return {key: convert_numpy(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_numpy(item) for item in obj]
        return obj
    
    # 转换并保存
    metrics_dict = convert_numpy(metrics_dict)
    with open(filepath, 'w', encoding='utf-8') as f:
        json.dump(metrics_dict, f, ensure_ascii=False, indent=4)
    
    print(f"训练指标已保存到: {filepath}")

class EarlyStopping:
    def __init__(self, patience=7, min_delta=0.001, verbose=True):
        self.patience = patience
        self.min_delta = min_delta
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = float('inf')
        
    def __call__(self, val_loss, val_auc, model, epoch):
        score = -val_loss  # 使用负的验证损失作为分数
        
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, val_auc, model, epoch)
        elif score < self.best_score + self.min_delta:
            self.counter += 1
            if self.verbose:
                print(f'早停计数器: {self.counter}/{self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, val_auc, model, epoch)
            self.counter = 0
            
    def save_checkpoint(self, val_loss, val_auc, model, epoch):
        if self.verbose:
            print(f'验证损失从 {self.val_loss_min:.4f} 改善到 {val_loss:.4f}')
        model_save_path = os.path.join(ROOT_DIR, 'best_model.pth')
        torch.save(model.state_dict(), model_save_path)
        self.val_loss_min = val_loss

def train_model(model, train_loader, val_loader, criterion, optimizer, device, num_epochs=20, scheduler=None):
    best_val_auc = 0.0
    
    # 创建早停对象
    early_stopping = EarlyStopping(patience=10, min_delta=0.0001, verbose=True)
    
    # 创建保存训练指标的字典
    training_history = {
        'epochs': [],
        'train_loss': [],
        'val_loss': [],
        'train_metrics': [],
        'val_metrics': [],
        'best_val_auc': 0.0,
        'best_epoch': 0,
        'training_time': 0,
        'early_stopping_epoch': 0,
        'model_info': {
            'model_name': model.__class__.__name__,
            'device': str(device),
            'optimizer': optimizer.__class__.__name__,
            'learning_rate': optimizer.param_groups[0]['lr'],
            'num_epochs': num_epochs,
            'batch_size': train_loader.batch_size,
            'train_size': len(train_loader.dataset),
            'val_size': len(val_loader.dataset)
        }
    }
    
    # 打印GPU信息
    if torch.cuda.is_available():
        print(f"使用GPU: {torch.cuda.get_device_name(0)}")
        print(f"GPU显存: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f}GB")
        # 设置CUDA性能优化
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False
    else:
        print("使用CPU训练")
    
    # 记录开始时间
    start_time = datetime.now()
    
    for epoch in range(num_epochs):
        epoch_start_time = datetime.now()
        
        # 训练阶段
        model.train()
        running_loss = 0.0
        all_labels = []
        all_predictions = []
        all_probabilities = []
        
        for inputs, labels in tqdm(train_loader, desc=f'Epoch {epoch+1}/{num_epochs} - Training'):
            inputs = inputs.to(device)
            labels = labels.float().to(device)
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs.squeeze(), labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            
            # 收集预测结果（添加detach()）
            all_labels.extend(labels.cpu().numpy())
            all_predictions.extend((outputs.squeeze().detach() >= 0.5).float().cpu().numpy())
            all_probabilities.extend(outputs.squeeze().detach().cpu().numpy())
        
        train_loss = running_loss / len(train_loader)
        
        # 计算训练集指标
        train_metrics = calculate_metrics(
            np.array(all_labels),
            np.array(all_predictions),
            np.array(all_probabilities)
        )
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_labels = []
        all_predictions = []
        all_probabilities = []
        
        with torch.no_grad():
            for inputs, labels in tqdm(val_loader, desc=f'Epoch {epoch+1}/{num_epochs} - Validation'):
                inputs = inputs.to(device)
                labels = labels.float().to(device)
                
                outputs = model(inputs)
                loss = criterion(outputs.squeeze(), labels)
                
                val_loss += loss.item()
                
                # 收集预测结果（添加detach()）
                all_labels.extend(labels.cpu().numpy())
                all_predictions.extend((outputs.squeeze() >= 0.5).float().cpu().numpy())
                all_probabilities.extend(outputs.squeeze().cpu().numpy())
        
        val_loss = val_loss / len(val_loader)
        
        # 计算验证集指标
        val_metrics = calculate_metrics(
            np.array(all_labels),
            np.array(all_predictions),
            np.array(all_probabilities)
        )
        
        # 记录当前epoch的指标
        epoch_metrics = {
            'epoch': epoch + 1,
            'train_loss': float(train_loss),
            'val_loss': float(val_loss),
            'train_metrics': train_metrics,
            'val_metrics': val_metrics,
            'epoch_time': (datetime.now() - epoch_start_time).total_seconds()
        }
        
        # 更新训练历史
        training_history['epochs'].append(epoch + 1)
        training_history['train_loss'].append(float(train_loss))
        training_history['val_loss'].append(float(val_loss))
        training_history['train_metrics'].append(train_metrics)
        training_history['val_metrics'].append(val_metrics)
        
        print(f'\nEpoch [{epoch+1}/{num_epochs}]')
        print(f'Training Loss: {train_loss:.4f}')
        print_metrics(train_metrics, "训练集")
        print(f'Validation Loss: {val_loss:.4f}')
        print_metrics(val_metrics, "验证集")
        
        # 更新学习率
        if scheduler is not None:
            scheduler.step(val_loss)
        
        # 早停检查
        early_stopping(val_loss, val_metrics['auc'], model, epoch + 1)
        
        if early_stopping.early_stop:
            print("触发早停！")
            training_history['early_stopping_epoch'] = epoch + 1
            break
        
        print('-' * 60)
    
    # 记录总训练时间
    training_history['training_time'] = (datetime.now() - start_time).total_seconds()
    
    # 保存训练指标
    metrics_save_dir = os.path.join(ROOT_DIR, 'training_metrics')
    save_metrics(training_history, metrics_save_dir)
    
    return training_history

def main():
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 获取数据集路径
    dataset_path = os.path.join(ROOT_DIR, 'dataset')
    print(f"项目根目录: {ROOT_DIR}")
    print(f"数据集路径: {dataset_path}")

    # 获取训练集和验证集
    train_dataset, val_dataset = get_train_val_datasets(dataset_path)
    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")

    # 创建数据加载器，优化参数
    train_loader = DataLoader(
        train_dataset, 
        batch_size=32,
        shuffle=True, 
        num_workers=4,
        pin_memory=True,
        persistent_workers=True
    )
    
    val_loader = DataLoader(
        val_dataset, 
        batch_size=32,
        shuffle=False, 
        num_workers=4,
        pin_memory=True,
        persistent_workers=True
    )

    # 初始化模型
    model = AIModel().to(device)
    
    # 检查是否存在已训练的模型
    model_path = os.path.join(ROOT_DIR, 'best_model.pth')
    if os.path.exists(model_path):
        print(f"加载已训练的模型: {model_path}")
        model.load_state_dict(torch.load(model_path))
    else:
        print("未找到已训练的模型，将从头训练")
    
    # 定义损失函数和优化器
    criterion = nn.BCELoss()
    optimizer = optim.AdamW(model.parameters(), lr=0.0001, weight_decay=0.01)
    
    # 添加学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 
        mode='min', 
        factor=0.5, 
        patience=3
    )

    # 训练模型
    training_history = train_model(
        model, 
        train_loader, 
        val_loader, 
        criterion, 
        optimizer, 
        device,
        scheduler=scheduler
    )

if __name__ == "__main__":
    main()
