import os, torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR, SequentialLR
from torch.nn.utils import clip_grad_norm_

def train(model, device, train_loader, val_loader, test_loader, criterion, optimizer, epochs, patience, model_save_dir, fig_save_dir):
    best_val_loss = float('inf')
    
    # 用于记录训练过程中的指标
    train_losses = []
    val_losses = []
    epochs_list = []
    lr_history = []
    best_model_path = None
    
    # 获取梯度裁剪阈值
    clip_grad_norm_value = None
    if hasattr(optimizer, 'param_groups') and 'clip_grad_norm' in optimizer.param_groups[0]:
        clip_grad_norm_value = optimizer.param_groups[0].get('clip_grad_norm')
    
    # 学习率调度器设置
    if hasattr(optimizer, 'param_groups') and 'scheduler' in optimizer.param_groups[0]:
        scheduler_type = optimizer.param_groups[0].get('scheduler', None)
        min_lr = optimizer.param_groups[0].get('min_lr', 1e-6)
        warmup_epochs = optimizer.param_groups[0].get('warmup_epochs', 0)
        
        if scheduler_type == 'cosine':
            main_scheduler = CosineAnnealingLR(optimizer, T_max=epochs-warmup_epochs, eta_min=min_lr)
            if warmup_epochs > 0:
                warmup_scheduler = LinearLR(
                    optimizer, 
                    start_factor=0.1, 
                    end_factor=1.0, 
                    total_iters=warmup_epochs
                )
                scheduler = SequentialLR(
                    optimizer,
                    schedulers=[warmup_scheduler, main_scheduler],
                    milestones=[warmup_epochs]
                )
            else:
                scheduler = main_scheduler
    else:
        scheduler = None
    
    # 早停策略
    # if hasattr(optimizer, 'param_groups') and 'patience' in optimizer.param_groups[0]:
        # patience = optimizer.param_groups[0].get('patience')
    counter = 0  # 早停计数器
    
    for epoch in range(epochs):
        model.train()
        total_train_loss = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data.float())
            loss = criterion(output, target.float())
            loss.backward()
            
            # 应用梯度裁剪，如果设置了裁剪阈值
            if clip_grad_norm_value is not None:
                clip_grad_norm_(model.parameters(), clip_grad_norm_value)
            
            optimizer.step()
            total_train_loss += loss.item()
        avg_train_loss = total_train_loss / len(train_loader)
        
        # 更新学习率
        if scheduler:
            scheduler.step()
            current_lr = optimizer.param_groups[0]['lr']
            lr_history.append(current_lr)
        
        model.eval()
        total_val_loss = 0
        with torch.no_grad():
            for data, target in val_loader:
                data, target = data.to(device), target.to(device)
                output = model(data.float())
                loss = criterion(output, target.float())
                total_val_loss += loss.item()
        avg_val_loss = total_val_loss / len(val_loader)
        
        # 记录每个epoch的指标
        train_losses.append(avg_train_loss)
        val_losses.append(avg_val_loss)
        epochs_list.append(epoch + 1)
        
        # 打印当前学习率和损失
        lr_info = f", LR: {optimizer.param_groups[0]['lr']:.6f}"
        print(f'Epoch: {epoch+1}/{epochs}, Train Loss: {avg_train_loss:.6f}, Val Loss: {avg_val_loss:.6f}{lr_info}')
        
        if avg_val_loss < best_val_loss:
            last_best_model_path = best_model_path
            best_model_path = f"{model_save_dir}/best_{avg_val_loss:.6f}.pth"
            best_val_loss = avg_val_loss
            if last_best_model_path: os.remove(last_best_model_path)
            torch.save(model.state_dict(), best_model_path)
            print(f'Epoch {epoch+1}: Saved best model, validation loss: {best_val_loss:.6f}')
            counter = 0  # 重置早停计数器
        else:
            counter += 1  # 验证损失未改善，计数器加1
        
        # 早停检查
        if counter >= patience:
            print(f'Early stopping: validation loss did not improve for {patience} consecutive epochs')
            break
            
        if (epoch + 1) % 50 == 0:
            checkpoint_path =f'{model_save_dir}/epoch_{epoch+1}_{avg_val_loss:.6f}.pth'
            torch.save(model.state_dict(), checkpoint_path)
            print(f'Epoch {epoch+1}: Saved checkpoint')
    final_model_path = f'{model_save_dir}/final_{avg_val_loss:.6f}.pth'
    torch.save(model.state_dict(), final_model_path)
    model.load_state_dict(torch.load(best_model_path))
    model.eval()
    total_test_loss = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data.float())
            loss = criterion(output, target.float())
            total_test_loss += loss.item()
    avg_test_loss = total_test_loss / len(test_loader)
    
    # 创建并保存训练过程中各指标的曲线图
    os.makedirs(fig_save_dir, exist_ok=True)
    
    # 设置图表样式
    sns.set(style="darkgrid")
    
    # 绘制训练和验证损失曲线
    plt.figure(figsize=(12, 6))
    plt.plot(epochs_list, train_losses, 'b-', label='Training Loss')
    plt.plot(epochs_list, val_losses, 'r-', label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Validation Loss Curves')
    plt.legend()
    plt.grid(True)
    loss_curve_path = os.path.join(fig_save_dir, 'loss_curve.png')
    plt.savefig(loss_curve_path)
    plt.close()
    
    # 绘制验证损失曲线（单独的，更详细的视图）
    plt.figure(figsize=(12, 6))
    plt.plot(epochs_list, val_losses, 'g-')
    plt.xlabel('Epoch')
    plt.ylabel('Validation Loss')
    plt.title('Validation Loss Curve')
    best_epoch = val_losses.index(min(val_losses)) + 1
    plt.axvline(x=best_epoch, color='r', linestyle='--', label=f'Best Model (Epoch {best_epoch})')
    plt.legend()
    plt.grid(True)
    val_loss_curve_path = os.path.join(fig_save_dir, 'val_loss_curve.png')
    plt.savefig(val_loss_curve_path)
    plt.close()
    
    # 绘制学习率变化曲线
    if lr_history:
        plt.figure(figsize=(12, 6))
        plt.plot(epochs_list, lr_history, 'b-')
        plt.xlabel('Epoch')
        plt.ylabel('Learning Rate')
        plt.title('Learning Rate Curve')
        plt.yscale('log')  # 使用对数刻度更清晰地显示学习率变化
        plt.grid(True)
        lr_curve_path = os.path.join(fig_save_dir, 'lr_curve.png')
        plt.savefig(lr_curve_path)
        plt.close()
    
    train_info = (f"Training completed, total epochs: {epoch+1}/{epochs}\n"
                     f"Best validation loss: {best_val_loss:.6f}\n"
                     f"Test loss: {avg_test_loss:.6f}\n"
                     f"Plots saved to: {fig_save_dir}")
    return model_save_dir, fig_save_dir, train_info