from net import MobileNetV2Classifier
from aff_dataset import AffectDataSet2
from trans import get_train_transform, get_val_transform
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn as nn
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torchvision
from torchvision.utils import make_grid
import os
import csv
from datetime import datetime

# 设置参数
data_dir = "/home/tunm/datasets/affectnet"
epochs = 100
batch_size = 256
learning_rate = 1e-3
num_classes = 7

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 创建运行目录结构
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
run_dir = f"runs/exp_{timestamp}"
checkpoints_dir = os.path.join(run_dir, "checkpoints")
visualizations_dir = os.path.join(run_dir, "visualizations")
logs_dir = os.path.join(run_dir, "logs")

# 创建所有必要的目录
os.makedirs(checkpoints_dir, exist_ok=True)
os.makedirs(visualizations_dir, exist_ok=True)
os.makedirs(logs_dir, exist_ok=True)

print(f"Training files will be saved to: {run_dir}")

# 数据变换
train_transform = get_train_transform()
val_transform = get_val_transform()

# 创建数据集
train_dataset = AffectDataSet2(data_path=data_dir, affcls=7, phase='train', transform=train_transform)
val_dataset = AffectDataSet2(data_path=data_dir, affcls=7, phase='valid', transform=val_transform)

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

# 创建模型
model = MobileNetV2Classifier(num_classes=num_classes, width_mult=0.5).to(device)

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=learning_rate)

# 学习率调度器 - 当10个epoch loss不下降时，lr*0.65
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.65, patience=10, verbose=True)

# 初始化最佳验证损失
best_val_loss = float('inf')
best_val_acc = 0.0

# 情感类别标签
emotion_labels = ['Neutral', 'Happy', 'Sad', 'Surprise', 'Fear', 'Disgust', 'Anger']

# 文件路径定义
csv_file = os.path.join(logs_dir, 'training_metrics.csv')
log_file = os.path.join(logs_dir, 'training_log.txt')
config_file = os.path.join(run_dir, 'config.txt')

# 保存训练配置
with open(config_file, 'w') as f:
    f.write(f"Training Configuration\n")
    f.write(f"=====================\n")
    f.write(f"Timestamp: {timestamp}\n")
    f.write(f"Data Directory: {data_dir}\n")
    f.write(f"Device: {device}\n")
    f.write(f"Epochs: {epochs}\n")
    f.write(f"Batch Size: {batch_size}\n")
    f.write(f"Learning Rate: {learning_rate}\n")
    f.write(f"Number of Classes: {num_classes}\n")
    f.write(f"Model: {model.__class__.__name__}\n")
    f.write(f"Width Multiplier: 0.5\n")
    f.write(f"Optimizer: Adam\n")
    f.write(f"Scheduler: ReduceLROnPlateau (patience=10, factor=0.65)\n")
    f.write(f"Loss Function: CrossEntropyLoss\n")

# CSV文件初始化
with open(csv_file, 'w', newline='') as f:
    csv_writer = csv.writer(f)
    csv_writer.writerow(['Epoch', 'Train Loss', 'Train Acc', 'Val Loss', 'Val Acc', 'LR'])

# 训练历史记录
train_losses = []
train_accs = []
val_losses = []
val_accs = []

# 创建训练日志文件
with open(log_file, 'w') as f:
    f.write(f"Training Log - Started at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
    f.write(f"Run Directory: {run_dir}\n")
    f.write(f"Model: {model.__class__.__name__}\n")
    f.write(f"Device: {device}\n")
    f.write(f"Epochs: {epochs}\n")
    f.write(f"Batch Size: {batch_size}\n")
    f.write(f"Initial Learning Rate: {learning_rate}\n")
    f.write("="*80 + "\n\n")

def train_epoch(model, dataloader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    
    pbar = tqdm(dataloader, desc='Training')
    for batch_idx, (images, labels) in enumerate(pbar):
        images, labels = images.to(device), labels.to(device)
        
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
        
        # 实时更新进度条
        pbar.set_postfix({
            'Loss': f'{running_loss/(batch_idx+1):.4f}',
            'Acc': f'{100.*correct/total:.2f}%'
        })
    
    epoch_loss = running_loss / len(dataloader)
    epoch_acc = 100. * correct / total
    
    return epoch_loss, epoch_acc

def validate_epoch(model, dataloader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    
    # 用于可视化的图像和标签
    vis_images = []
    vis_labels = []
    vis_preds = []
    
    with torch.no_grad():
        pbar = tqdm(dataloader, desc='Validation')
        for batch_idx, (images, labels) in enumerate(pbar):
            images, labels = images.to(device), labels.to(device)
            
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
            # 收集用于可视化的样本（前25个）
            if len(vis_images) < 25:
                vis_images.extend(images.cpu()[:min(25-len(vis_images), len(images))])
                vis_labels.extend(labels.cpu()[:min(25-len(vis_labels), len(labels))])
                vis_preds.extend(predicted.cpu()[:min(25-len(vis_preds), len(predicted))])
            
            # 实时更新进度条
            pbar.set_postfix({
                'Loss': f'{running_loss/(batch_idx+1):.4f}',
                'Acc': f'{100.*correct/total:.2f}%'
            })
    
    epoch_loss = running_loss / len(dataloader)
    epoch_acc = 100. * correct / total
    
    return epoch_loss, epoch_acc, vis_images[:25], vis_labels[:25], vis_preds[:25]

def create_visualization_grid(images, labels, predictions, epoch):
    """创建5x5的可视化网格"""
    fig, axes = plt.subplots(5, 5, figsize=(15, 15))
    axes = axes.ravel()
    
    for idx in range(min(25, len(images))):
        img = images[idx]
        if img.shape[0] == 1:  # 单通道图像
            img = img.repeat(3, 1, 1)
        
        # 还原为0-255范围
        img = img * 255
        img = torch.clamp(img, 0, 255) / 255  # 转回0-1范围用于imshow
        
        # 转换为numpy格式
        img_np = img.permute(1, 2, 0).numpy()
        # 将BGR转换为RGB (交换第一个和最后一个通道)
        img_np = img_np[..., ::-1]  # 或者更明确: img_np = img_np[..., [2,1,0]]
        
        axes[idx].imshow(img_np)
        axes[idx].set_title(f'True: {emotion_labels[labels[idx]]}\nPred: {emotion_labels[predictions[idx]]}',
                           fontsize=10)
        axes[idx].axis('off')
    
    # 如果图片少于25张，隐藏多余的子图
    for idx in range(len(images), 25):
        axes[idx].axis('off')
    
    plt.suptitle(f'Validation Predictions - Epoch {epoch}', fontsize=16)
    plt.tight_layout()
    
    # 保存到visualizations目录
    plt.savefig(os.path.join(visualizations_dir, f'predictions_epoch_{epoch:03d}.png'), 
                dpi=100, bbox_inches='tight')
    plt.close()

def plot_training_curves(train_losses, train_accs, val_losses, val_accs, epoch):
    """绘制训练曲线"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
    
    # 损失曲线
    ax1.plot(range(1, len(train_losses)+1), train_losses, 'b-', label='Train Loss', linewidth=2)
    ax1.plot(range(1, len(val_losses)+1), val_losses, 'r-', label='Val Loss', linewidth=2)
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.set_title('Training and Validation Loss')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 准确率曲线
    ax2.plot(range(1, len(train_accs)+1), train_accs, 'b-', label='Train Acc', linewidth=2)
    ax2.plot(range(1, len(val_accs)+1), val_accs, 'r-', label='Val Acc', linewidth=2)
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Accuracy (%)')
    ax2.set_title('Training and Validation Accuracy')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    # 保存到visualizations目录
    plt.savefig(os.path.join(visualizations_dir, f'training_curves_epoch_{epoch:03d}.png'), 
                dpi=100, bbox_inches='tight')
    plt.close()

# 主训练循环
print("Starting training...")
print(f"Files will be saved to: {run_dir}")

for epoch in range(1, epochs + 1):
    print(f"\nEpoch {epoch}/{epochs}")
    print("-" * 50)
    
    # 训练
    train_loss, train_acc = train_epoch(model, train_loader, criterion, optimizer, device)
    
    # 验证
    val_loss, val_acc, vis_images, vis_labels, vis_preds = validate_epoch(
        model, val_loader, criterion, device
    )
    
    # 记录历史
    train_losses.append(train_loss)
    train_accs.append(train_acc)
    val_losses.append(val_loss)
    val_accs.append(val_acc)
    
    # 学习率调度
    scheduler.step(val_loss)
    current_lr = optimizer.param_groups[0]['lr']
    
    # 打印结果
    print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%")
    print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%")
    print(f"Learning Rate: {current_lr:.6f}")
    
    # 写入txt日志文件
    with open(log_file, 'a') as f:
        f.write(f"Epoch {epoch}/{epochs}\n")
        f.write(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%\n")
        f.write(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%\n")
        f.write(f"Learning Rate: {current_lr:.6f}\n")
        if val_loss < best_val_loss:
            f.write(f"*** New Best Model! ***\n")
        f.write("-"*50 + "\n\n")
    
    # 保存到CSV
    with open(csv_file, 'a', newline='') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow([epoch, train_loss, train_acc, val_loss, val_acc, current_lr])
    
    # 创建可视化（每5个epoch或最后一个epoch）
    if epoch % 1 == 0 or epoch == epochs:
        if len(vis_images) > 0:
            create_visualization_grid(vis_images, vis_labels, vis_preds, epoch)
        
        # 绘制训练曲线
        plot_training_curves(train_losses, train_accs, val_losses, val_accs, epoch)
    
    # 保存最佳模型
    if val_loss < best_val_loss:
        best_val_loss = val_loss
        best_val_acc = val_acc
        
        best_model_path = os.path.join(checkpoints_dir, 'best_model.pth')
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'best_val_loss': best_val_loss,
            'best_val_acc': best_val_acc,
            'train_losses': train_losses,
            'train_accs': train_accs,
            'val_losses': val_losses,
            'val_accs': val_accs,
        }, best_model_path)
        print(f"Saved best model with val_loss: {best_val_loss:.4f}, val_acc: {best_val_acc:.2f}%")
    
    # 定期保存检查点（每20个epoch）
    if epoch % 20 == 0:
        checkpoint_path = os.path.join(checkpoints_dir, f'checkpoint_epoch_{epoch:03d}.pth')
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'train_losses': train_losses,
            'train_accs': train_accs,
            'val_losses': val_losses,
            'val_accs': val_accs,
        }, checkpoint_path)

# 保存最终模型
final_model_path = os.path.join(checkpoints_dir, 'final_model.pth')
torch.save({
    'epoch': epochs,
    'model_state_dict': model.state_dict(),
    'optimizer_state_dict': optimizer.state_dict(),
    'scheduler_state_dict': scheduler.state_dict(),
    'train_losses': train_losses,
    'train_accs': train_accs,
    'val_losses': val_losses,
    'val_accs': val_accs,
    'best_val_loss': best_val_loss,
    'best_val_acc': best_val_acc,
}, final_model_path)

# 创建最终的训练曲线图
plot_training_curves(train_losses, train_accs, val_losses, val_accs, epochs)

# 训练结束
print("\nTraining completed!")
print(f"Best validation loss: {best_val_loss:.4f}")
print(f"Best validation accuracy: {best_val_acc:.2f}%")
print(f"\nAll files saved to: {run_dir}")
print(f"├── config.txt                 # 训练配置")
print(f"├── logs/")
print(f"│   ├── training_metrics.csv   # CSV格式的训练指标")
print(f"│   └── training_log.txt       # 详细训练日志")
print(f"├── checkpoints/")
print(f"│   ├── best_model.pth         # 最佳模型")
print(f"│   ├── final_model.pth        # 最终模型")
print(f"│   └── checkpoint_epoch_*.pth # 定期检查点")
print(f"└── visualizations/")
print(f"    ├── predictions_epoch_*.png # 预测结果可视化")
print(f"    └── training_curves_*.png   # 训练曲线")

# 写入最终总结到日志文件
with open(log_file, 'a') as f:
    f.write("\n" + "="*80 + "\n")
    f.write(f"Training Completed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
    f.write(f"Total Epochs: {epochs}\n")
    f.write(f"Best Validation Loss: {best_val_loss:.4f}\n")
    f.write(f"Best Validation Accuracy: {best_val_acc:.2f}%\n")
    f.write(f"Final Learning Rate: {optimizer.param_groups[0]['lr']:.6f}\n")
    f.write(f"All files saved to: {run_dir}\n")

# 创建README文件
readme_path = os.path.join(run_dir, 'README.md')
with open(readme_path, 'w') as f:
    f.write(f"# Training Run {timestamp}\n\n")
    f.write(f"## Configuration\n")
    f.write(f"- **Start Time**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
    f.write(f"- **Device**: {device}\n")
    f.write(f"- **Epochs**: {epochs}\n")
    f.write(f"- **Batch Size**: {batch_size}\n")
    f.write(f"- **Learning Rate**: {learning_rate}\n")
    f.write(f"- **Classes**: {num_classes} ({', '.join(emotion_labels)})\n\n")
    f.write(f"## Results\n")
    f.write(f"- **Best Validation Loss**: {best_val_loss:.4f}\n")
    f.write(f"- **Best Validation Accuracy**: {best_val_acc:.2f}%\n\n")
    f.write(f"## File Structure\n")
    f.write(f"```\n")
    f.write(f"{run_dir}/\n")
    f.write(f"├── README.md\n")
    f.write(f"├── config.txt\n")
    f.write(f"├── logs/\n")
    f.write(f"│   ├── training_metrics.csv\n")
    f.write(f"│   └── training_log.txt\n")
    f.write(f"├── checkpoints/\n")
    f.write(f"│   ├── best_model.pth\n")
    f.write(f"│   ├── final_model.pth\n")
    f.write(f"│   └── checkpoint_epoch_*.pth\n")
    f.write(f"└── visualizations/\n")
    f.write(f"    ├── predictions_epoch_*.png\n")
    f.write(f"    └── training_curves_*.png\n")
    f.write(f"```\n")

print(f"\nREADME created: {readme_path}")