import torch
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau
import json
import sys
import os
from multiprocessing import freeze_support
import logging
from torch.nn.utils import clip_grad_norm_

# 将项目根目录添加到模块搜索路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from models.network import FontModel  # 导入模型定义

# 配置日志
logging.basicConfig(filename='training.log', level=logging.INFO, format='%(asctime)s - %(message)s')

def main():
    # 数据预处理（增加数据增强）
    transform = transforms.Compose([
        transforms.RandomRotation(10),  # 随机旋转
        transforms.RandomResizedCrop(224),  # 随机裁剪
        transforms.RandomHorizontalFlip(),  # 随机水平翻转
        transforms.ColorJitter(brightness=0.2, contrast=0.2),  # 颜色抖动
        transforms.ToTensor(),  # 将 PIL.Image 转换为 Tensor
        transforms.RandomErasing(p=0.5, scale=(0.02, 0.1), ratio=(0.3, 3.3)),  # 随机遮挡
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 加载数据集
    train_dataset = datasets.ImageFolder(root="data/train", transform=transform)
    val_dataset = datasets.ImageFolder(root="data/val", transform=transform)  # 验证集
    train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=4)  # 减小 batch_size
    val_loader = DataLoader(val_dataset, batch_size=16, shuffle=False, num_workers=4)  # 减小 batch_size

    # 打印类别顺序
    class_names = train_dataset.classes
    print("类别顺序:", class_names)

    with open("class_names.json", "w", encoding="utf-8") as f:
        json.dump(class_names, f, ensure_ascii=False, indent=4)
    print("类别顺序已保存到 class_names.json")

    # 初始化模型
    model = FontModel(num_classes=len(train_dataset.classes))  # 类别数自动从数据集推断

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)  # 增加学习率和 L2 正则化

    # 定义训练周期数
    num_epochs = 10

    # 学习率调度器
    scheduler_cosine = CosineAnnealingLR(optimizer, T_max=num_epochs)  # 使用余弦退火调度器
    scheduler_plateau = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=2)  # 验证 Loss 2 个 epoch 不下降时，学习率乘以 0.1

    # 训练设备（GPU或CPU）
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    # 早停配置
    best_val_loss = float('inf')
    patience = 3  # 允许的连续不下降周期数
    epochs_without_improvement = 0

    # 训练循环
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        for images, labels in train_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            clip_grad_norm_(model.parameters(), max_norm=1.0)  # 梯度裁剪
            optimizer.step()
            running_loss += loss.item()
        
        # 计算训练 Loss
        train_loss = running_loss / len(train_loader)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for images, labels in val_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                _, predicted = torch.max(outputs, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        
        # 计算验证 Loss 和准确率
        val_loss /= len(val_loader)
        val_accuracy = 100 * correct / total
        
        # 打印结果
        print(f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}%")
        logging.info(f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}%")
        
        # 学习率调度
        scheduler_cosine.step()  # 更新余弦退火调度器
        scheduler_plateau.step(val_loss)  # 根据验证 Loss 更新调度器
        
        # 早停逻辑
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            epochs_without_improvement = 0
            torch.save(model.state_dict(), "models/best_font_model.pth")  # 保存最佳模型
            print(f"Epoch [{epoch+1}/{num_epochs}], 保存最佳模型")
        else:
            epochs_without_improvement += 1
            if epochs_without_improvement >= patience:
                print("早停：验证 Loss 不再下降")
                break

        # 定期保存中间模型
        if (epoch + 1) % 2 == 0:  # 每 2 个 epoch 保存一次
            torch.save(model.state_dict(), f"models/font_model_epoch{epoch+1}.pth")

    # 保存最终模型权重
    torch.save(model.state_dict(), "models/font_model.pth")
    print("训练完成，模型权重已保存到 models/font_model.pth")

if __name__ == '__main__':
    freeze_support()  # 在 Windows 上启用多进程支持
    main()