import torch
import copy
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import numpy as np
import cv2

def adjust_learning_rate(optimizer, gamma, global_step):
    """Adjusts learning rate according to exponential decay schedule."""
    lr = 1e-3 * (gamma ** global_step)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return optimizer

def train_one_epoch(model, train_loader, optimizer, criterion, device):
    """Trains the model for one epoch and returns training metrics."""
    model.train()
    total_loss1 = 0.0
    total_loss2 = 0.0
    total_task1_acc = 0
    total_task2_acc = 0
    count = 0

    for batch_idx, (images, labels) in enumerate(train_loader):
        images = images.view(-1, 256, 256, 1).to(device)
        labels = labels.view(64, -1).to(device)  # Assuming labels have two dimensions: [batch, 2]
        labels = torch.squeeze(labels)
        
        optimizer.zero_grad()
        main_task_output, aux_task_output,_,_ = model(images)
        
        # Calculate task 1 loss and accuracy
        loss1 = criterion(main_task_output, labels[:, 0])
        task1_label = torch.max(main_task_output, dim=1)[1]
        task1_acc = torch.eq(task1_label, labels[:, 0]).sum().item()
        
        # Calculate task 2 loss and accuracy
        loss2 = criterion(aux_task_output, labels[:, 1])
        task2_label = torch.max(aux_task_output, dim=1)[1]
        task2_acc = torch.eq(task2_label, labels[:, 1]).sum().item()
        
        # Combined loss with fixed weights
        loss = 0.9 * loss1 + 0.1 * loss2
        loss.backward()
        optimizer.step()
        
        # Update metrics
        count += images.shape[0]
        total_loss1 += loss1.item()
        total_loss2 += loss2.item()
        total_task1_acc += task1_acc
        total_task2_acc += task2_acc

    # Calculate epoch averages
    avg_loss1 = total_loss1 / len(train_loader)
    avg_loss2 = total_loss2 / len(train_loader)
    avg_acc1 = total_task1_acc / count
    avg_acc2 = total_task2_acc / count

    return {
        'main_loss': avg_loss1,
        'aux_loss': avg_loss2,
        'main_acc': avg_acc1,
        'aux_acc': avg_acc2,
        'samples': count
    }

def validate(model, loader, device, criterion, task='main'):
    """Validates the model and returns accuracy/loss."""
    model.eval()
    total_loss = 0.0
    total_acc = 0
    count = 0
    
    with torch.no_grad():
        for images, labels in loader:
            images = images.view(-1, 256, 256, 1).to(device)
            labels = labels.view(-1, 1).to(device)
            labels = torch.squeeze(labels)
            
            count += images.shape[0]
            main_output, aux_output, _, _ = model(images)
            
            # Select task output
            output = main_output if task == 'main' else aux_output
            #target = labels[:, 0] if task == 'main' else labels[:, 1]
            labels = labels.view(-1, 1).to(device)
            labels = torch.squeeze(labels)
            # Calculate loss and accuracy
            loss = criterion(output, labels)
            pred = torch.max(output, dim=1)[1]
            acc = torch.eq(pred, labels).sum().item()
            
            total_loss += loss.item()
            total_acc += acc

    avg_loss = total_loss / len(loader)
    avg_acc = total_acc / count
    return avg_loss, avg_acc

def train(
    model,
    train_loader,
    valid_loader,
    test_loader,
    optimizer,
    criterion,
    device,
    EPOCHS,
    save_interval=5,
    lr_adjust_epochs=[80, 120],
    gamma=0.1,
    early_stop_patience=25,
    early_stop_threshold=0.85,
    log_dir='runs',
    checkpoint_path=None
):
    """Main training function with validation and testing each epoch."""
    # Initialize
    writer = SummaryWriter(log_dir=log_dir)
    global_step = 0
    best_valid_acc = 0.0
    best_model_state = None
    early_stop_counter = 0
    converged = False
    model.to(device)
    # Load checkpoint if provided
    if checkpoint_path:
        print(f"Loading model from {checkpoint_path}")
        model.load_state_dict(torch.load(checkpoint_path))
    
    # History for metrics
    metrics = {
        'train_main_loss': [], 'train_aux_loss': [],
        'train_main_acc': [], 'train_aux_acc': [],
        'valid_main_loss': [], 'valid_main_acc': [],
        'test_main_loss': [], 'test_main_acc': []
    }

    # Main training loop
    for epoch in range(1, EPOCHS + 1):
        if converged:
            print(f"Early stopping at epoch {epoch}")
            break
            
        # Train for one epoch
        print("train!")
        train_metrics = train_one_epoch(
            model, train_loader, optimizer, criterion, device
        )
        
        # Update global step (batches processed)
        global_step += len(train_loader)
        
        # Validate on main task
        print("valid!")
        valid_loss, valid_acc = validate(
            model, valid_loader, device, criterion, task='main'
        )
        
        # Test on main task
        print("test!")
        test_loss, test_acc = validate(
            model, test_loader, device, criterion, task='main'
        )
        
        # Record metrics
        metrics['train_main_loss'].append(train_metrics['main_loss'])
        metrics['train_aux_loss'].append(train_metrics['aux_loss'])
        metrics['train_main_acc'].append(train_metrics['main_acc'])
        metrics['train_aux_acc'].append(train_metrics['aux_acc'])
        metrics['valid_main_loss'].append(valid_loss)
        metrics['valid_main_acc'].append(valid_acc)
        metrics['test_main_loss'].append(test_loss)
        metrics['test_main_acc'].append(test_acc)
        
        # Tensorboard logging
        writer.add_scalar('Loss/train_main', train_metrics['main_loss'], epoch)
        writer.add_scalar('Loss/train_aux', train_metrics['aux_loss'], epoch)
        writer.add_scalar('Loss/valid', valid_loss, epoch)
        writer.add_scalar('Loss/test', test_loss, epoch)
        writer.add_scalar('Accuracy/train_main', train_metrics['main_acc'], epoch)
        writer.add_scalar('Accuracy/valid', valid_acc, epoch)
        writer.add_scalar('Accuracy/test', test_acc, epoch)
        
        # Print epoch summary
        print(f"\nEpoch {epoch}/{EPOCHS}:")
        print(f"  Train Main - Loss: {train_metrics['main_loss']:.6f}, Acc: {train_metrics['main_acc']:.4f}")
        print(f"  Valid Main - Loss: {valid_loss:.6f}, Acc: {valid_acc:.4f}")
        print(f"  Test  Main - Loss: {test_loss:.6f}, Acc: {test_acc:.4f}")

        # Save best model
        if valid_acc > best_valid_acc:
            best_valid_acc = valid_acc
            best_model_state = copy.deepcopy(model.state_dict())
            early_stop_counter = 0
            torch.save(model.state_dict(), f'best_model_epoch{epoch}_acc{valid_acc:.4f}.pth')
        else:
            early_stop_counter += 1
            
        # Save checkpoint periodically
        if epoch % save_interval == 0:
            torch.save(model.state_dict(), f'checkpoint_epoch{epoch}.pth')
        
        # Adjust learning rate
        if epoch in lr_adjust_epochs:
            optimizer = adjust_learning_rate(optimizer, gamma, epoch)
            print(f"Learning rate adjusted to {optimizer.param_groups[0]['lr']:.2e}")
        
        # Check convergence (early stopping)
        if valid_acc >= early_stop_threshold:
            if early_stop_counter >= early_stop_patience:
                converged = True
                print("Early stopping triggered")
        else:
            early_stop_counter = 0  # Reset counter if below threshold
            
        # Visualization and TSNE (optional)
        if epoch % 10 == 0:
            plot_metrics(metrics)
            # visualize_tsne(model, valid_loader, epoch)
            # generate_heatmap(model, next(iter(train_loader))[0][0], epoch, device)
    
    # Save best model after training
    if best_model_state:
        model.load_state_dict(best_model_state)
        torch.save(model.state_dict(), f'final_best_model_acc{best_valid_acc:.4f}.pth')
        print(f"Saved best model with accuracy {best_valid_acc:.4f}")
    
    writer.close()
    return model, metrics

# Helper functions remain the same (plot_metrics, generate_heatmap, visualize_tsne, etc.)
def plot_metrics(train_main_loss, train_aux_loss, 
                train_main_acc, train_aux_acc,
                test_main_acc):
    plt.figure(figsize=(15,10))
    
    # 损失曲线
    plt.subplot(2,2,1)
    plt.plot(train_main_loss, label='Main Task Loss')
    plt.plot(train_aux_loss, label='Aux Task Loss')
    plt.title('Training Loss')
    plt.legend()
    
    # 训练准确率
    plt.subplot(2,2,2)
    plt.plot(train_main_acc, label='Main Task Acc')
    plt.plot(train_aux_acc, label='Aux Task Acc')
    plt.title('Training Accuracy')
    plt.legend()

    
    plt.tight_layout()
    plt.savefig('./training_metrics.png')
    plt.close()
def generate_heatmap(model, sample_data, epoch, device):
    model.eval()
    # 确保输入数据格式正确（NHWC）
    if sample_data.dim() == 3:
        sample_data = sample_data.unsqueeze(0)  # 添加批次维度
    sample_data = sample_data.to(device)
    
    with torch.no_grad():
        _, _, _, orig_img = model(sample_data)  # 获取原始图像
    
    # 获取第八层激活 - 现在应该是4维 [batch_size, C, H, W]
    activation = model.activation['layer8']
    
    # 处理激活图 - 适应批次维度
    print(f"Activation shape (before processing): {activation.shape}")
    
    # 选择批次中的第一个样本（您可以根据需要选择）
    selected_index = 0  # 选择第一个样本生成热力图
    activation = activation[selected_index]  # 形状变为 [C, H, W]
    
    # 计算热力图 - 取通道维度的平均
    heatmap = torch.mean(activation, dim=0).cpu().numpy()  # 变为 [H, W]
    
    # 归一化处理
    heatmap = np.maximum(heatmap, 0)
    heatmap_min = heatmap.min()
    heatmap_max = heatmap.max()
    heatmap = (heatmap - heatmap_min) / (heatmap_max - heatmap_min + 1e-8)
    
    print(f"Heatmap shape: {heatmap.shape}")
    
    # 处理原始图像 - 同样选择第一个样本
    # 模型返回的orig_img形状为 [batch_size, C, H, W]
    img = orig_img[selected_index].squeeze().cpu().numpy()  # 移除批次维度
    
    # 转换通道顺序 (CHW -> HWC) 用于显示
    if img.ndim == 3 and img.shape[0] < img.shape[1] and img.shape[0] < img.shape[2]:
        img = np.transpose(img, (1, 2, 0))
    
    # 处理多通道图像
    if img.ndim == 3:
        if img.shape[-1] in [1, 3, 4]:  # 如果是灰度、RGB或RGBA
            # 将单通道灰度图转换为2D
            if img.shape[-1] == 1:
                img = img[..., 0]
        else:
            # 对于非常规的多通道图像，只使用第一个通道
            img = img[..., 0]
    
    # 归一化图像
    img_min = img.min()
    img_max = img.max()
    img = (img - img_min) / (img_max - img_min + 1e-8)
    
    # 确保热力图是2D（H x W）
    if heatmap.ndim != 2:
        raise ValueError(f"Heatmap should be 2-dimensional (H, W), got {heatmap.ndim} dimensions")
    
    # 获取原始图像尺寸
    img_height, img_width = img.shape[:2]
    
    # 打印调试信息
    print(f"Processed image shape: {img.shape}")
    
    # 创建可视化
    plt.figure(figsize=(15, 6), dpi=100)
    
    # 原始图像
    plt.subplot(1, 3, 1)
    plt.imshow(img, cmap='gray')
    plt.title('Original Image')
    plt.axis('off')
    
    # 激活热力图
    plt.subplot(1, 3, 2)
    plt.imshow(heatmap, cmap='viridis')
    plt.title('Layer8 Activation Map')
    plt.colorbar(fraction=0.046, pad=0.04)
    plt.axis('off')
    
    # 叠加显示
    plt.subplot(1, 3, 3)
    plt.imshow(img, cmap='gray')
    
    # 调整热力图尺寸以匹配原始图像
    resized_heatmap = cv2.resize(heatmap, (img_width, img_height))
    
    plt.imshow(resized_heatmap, 
               alpha=0.5, 
               cmap='jet',
               interpolation='bicubic')
    
    plt.title('Overlay Visualization')
    plt.axis('off')
    
    # 保存结果
    plt.savefig(f'./layer8_heatmap_epoch{epoch}.png',
               bbox_inches='tight',
               pad_inches=0.1)
    plt.close()