from pathlib import Path

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
from timm import create_model
import matplotlib.pyplot as plt
import numpy as np
#设置随机种子(保证可复见性)
torch.manual_seed(42)
np.random.seed(42)
# 设置设备（GPU/CPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# -----------------------------------
# 1. 数据预处理与加载
# -----------------------------------
# 定义数据增强和归一化（匹配ImageNet统计值）
train_transform = transforms.Compose([
    transforms.RandomResizedCrop(224),  # 随机缩放裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),  # 转为Tensor
    transforms.Normalize(  # 归一化
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225]
    )
])
#验证集只做中心裁剪 不做增强
val_transform = transforms.Compose([
    transforms.Resize(256),  # 调整大小
    transforms.CenterCrop(224),  # 中心裁剪
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 加载数据集（假设目录结构为 train/class1/, train/class2/, ...）
train_dataset = datasets.ImageFolder(root="/home/featurize/work/melon17_full/train", transform=train_transform)
val_dataset = datasets.ImageFolder(root="/home/featurize/work/melon17_full/val", transform=val_transform)

# 创建数据加载器
# shuffle 是否打乱顺序 batch_size 一个批次处理的样本数量 num_workers 用于数据加载的子进程数量
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=4)

# 查看类别信息
class_names = train_dataset.classes
print(f"Classes: {class_names}")

# -----------------------------------
# 2. 定义模型（EfficientNet-B0）
# -----------------------------------
model = create_model("efficientnet_b0", pretrained=True, num_classes=len(class_names))
model = model.to(device)

# -----------------------------------
# 3. 定义损失函数和优化器
# -----------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=1e-4)
# 当验证集损失在 5 个 epoch 内没有下降时，将学习率乘以 0.1
#ReduceLROnPlateau 根据验证集性能更新
#更新时机：当验证集上的性能（如损失值、准确率等）在一定轮次内没有提升时，降低学习率。
#适用场景：适用于模型训练过程中，验证集性能出现瓶颈的情况。通过降低学习率，模型可能会跳出局部最优解，继续提升性能。常用于需要精确调整模型以提高泛化能力的任务。
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3)
# 记录上一次的学习率
prev_lr = optimizer.param_groups[0]['lr']
#固定步长衰减（StepLR）
#更新时机：每经过固定数量的轮次（Epoch）就更新一次学习率。例如，设置每 10 个 Epoch 学习率乘以一个衰减因子（如 0.1）。
#适用场景：当你希望模型在训练前期以较大学习率快速收敛，在经过一定轮次后，降低学习率以更精细地调整模型参数时使用。比如在图像分类任务中，前期模型需要快速学习数据的大致特征，后期则需要微调以提高分类准确率。
#scheduler = StepLR(optimizer, step_size=10, gamma=0.1)


#余弦退火调度器（CosineAnnealingLR）
#更新时机：按轮次更新，学习率会按照余弦函数的形式在一个周期内逐渐减小再增大。
#适用场景：适用于需要周期性调整学习率的场景，帮助模型跳出局部最优解，探索更广阔的参数空间。常用于训练复杂模型，如深度卷积神经网络。
#scheduler = CosineAnnealingLR(optimizer, T_max=20)

#学习率预热（Warm - up）
#更新时机：在训练开始的前几个迭代步骤中更新学习率。通常从一个非常小的学习率开始，随着迭代次数的增加逐渐增大到预设的初始学习率。
#适用场景：在大模型微调时，模型参数已经在大规模数据上预训练过，使用学习率预热可以让模型在微调初期平稳地适应新数据，避免参数的剧烈变化。
#scheduler = LambdaLR(optimizer, lr_lambda=warmup_lr_scheduler)


# -----------------------------------
# 4. 训练与验证循环
# -----------------------------------
def train_one_epoch(model, loader, criterion, optimizer):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for images, labels in loader:
        images, labels = images.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    epoch_loss = running_loss / len(loader)
    epoch_acc = 100 * correct / total
    return epoch_loss, epoch_acc
# pip install torch torchvision timm transformers datasets

def validate(model, loader, criterion):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    #模型验证阶段无需更新模型参数，禁用梯度计算减少内存消耗和加速计算
    with torch.no_grad():
        for images, labels in loader:
            # 将数据移动到设备上
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            #计算模型输出与真实标签之间的损失值
            loss = criterion(outputs, labels)
            #将单个损失值累加到running_loss中
            running_loss += loss.item()
            #从模型输出的预测值中选择最大值的索引作为预测值
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    #计算损失和准确率
    epoch_loss = running_loss / len(loader)
    epoch_acc = 100 * correct / total
    return epoch_loss, epoch_acc


class ModelCheckpoint:
    def save_model(self, model, epoch):
        torch.save(model.state_dict(), self.save_path)
        print(f"Model saved to {self.save_path} at epoch {epoch}")
    def __init__(self, save_dir='checkpoints', model_name='best_model',
                 monitor='val_acc', mode='max', patience=5):
        """
        参数:
            save_dir: 保存目录
            model_name: 模型文件名(不带后缀)
            monitor: 监控指标('val_loss'或'val_acc')
            mode: 'max'表示指标越大越好，'min'表示越小越好
            patience: 早停耐心值(连续多少轮不改善就停止)
        """
        self.save_path = Path(save_dir) / f"{model_name}.pth"
        self.monitor = monitor
        self.mode = mode
        self.patience = patience
        self.counter = 0
        self.best_score = -np.inf if mode == 'max' else np.inf
        self.early_stop = False

        # 创建保存目录
        Path(save_dir).mkdir(parents=True, exist_ok=True)

    def __call__(self, current_val, model, epoch):
        if self.mode == 'max':
            is_better = current_val > self.best_score
        else:
            is_better = current_val < self.best_score

        if is_better:
            #如果本次的指标比之前的更好，保存模型，重置计数器
            print(f"{self.monitor} improved from {self.best_score:.4f} to {current_val:.4f}")
            self.best_score = current_val
            self.save_model(model, epoch)
            self.counter = 0
        else:
            self.counter += 1
            print(f"{self.monitor} did not improve. Patience {self.counter}/{self.patience}")
            # 如果 counter 值达到设定的轮数patience，则停止训练
            if self.counter >= self.patience:
                self.early_stop = True


if __name__ == "__main__":
    print(f"Using device: {device}")
# -----------------------------------
# 5. 主训练循环
# -----------------------------------
num_epochs = 100
train_losses, val_losses = [], []
train_accs, val_accs = [], []
best_val_loss = float('inf')
# 初始化模型检查点
checkpoint = ModelCheckpoint(
    save_dir='./',
    model_name='efficientnet_b0_best',
    monitor='val_acc',
    mode='max',
    patience=10
)
for epoch in range(num_epochs):
    train_loss, train_acc = train_one_epoch(model, train_loader, criterion, optimizer)
    val_loss, val_acc = validate(model, val_loader, criterion)
    #根据验证集损失进行学习率调整
    scheduler.step(val_loss)
    # 获取当前的学习率
    current_lr = scheduler.optimizer.param_groups[0]['lr']

    # 检查学习率是否发生变化
    if current_lr != prev_lr:
        print(f'Epoch {epoch + 1}: reducing learning rate of group 0 to {current_lr:.4e}.')
        prev_lr = current_lr
    train_losses.append(train_loss)
    val_losses.append(val_loss)
    train_accs.append(train_acc)
    val_accs.append(val_acc)

    print(f"Epoch [{epoch + 1}/{num_epochs}] | "
          f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}% | "
          f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%")
    # 检查点回调
    checkpoint(val_acc, model, epoch)

    # 早停检查
    if checkpoint.early_stop:
        print(f"Early stopping at epoch {epoch}")
        break

# -----------------------------------
# 6. 结果可视化
# -----------------------------------
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(train_losses, label="Train Loss")
plt.plot(val_losses, label="Val Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(train_accs, label="Train Acc")
plt.plot(val_accs, label="Val Acc")
plt.xlabel("Epoch")
plt.ylabel("Accuracy (%)")
plt.legend()
plt.show()
plt.savefig("loss_acc.png")

# -----------------------------------
# 7. 保存模型
# -----------------------------------
