"""
train.py
训练主脚本：读取数据 -> 构建模型 -> 训练 -> 验证 -> 保存最好模型
运行：python train.py
"""
import torch, os, time
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report  # 打印详细指标
from src.dataset import HerbDataset
from src.model import get_model
from src.config import Config
from tqdm import tqdm  # 进度条库

def main():
    # 打印时间，计算耗时
    start_time = time.time()
    print('✅训练开始， 开始时间', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
    # ---------- 1. 数据集 ----------
    train_ds = HerbDataset(Config.train_dir, Config.input_size, 'train')
    test_ds  = HerbDataset(Config.test_dir,  Config.input_size, 'test')

    # DataLoader：自动打包成 batch，多线程读取
    train_loader = DataLoader(train_ds,
                              batch_size=Config.batch_size,
                              shuffle=True,      # 训练时打乱
                              num_workers=4)     # 4 个子进程加载数据
    test_loader  = DataLoader(test_ds,
                              batch_size=Config.batch_size,
                              shuffle=False,
                              num_workers=4)

    # ---------- 2. 模型 ----------
    model = get_model().to(Config.device)

    # ---------- 3. 损失函数 & 优化器 ----------
    criterion = torch.nn.CrossEntropyLoss()  # 多分类交叉熵
    optimizer = torch.optim.Adam(model.parameters(), lr=Config.lr)

    # ---------- 4. 训练 ----------
    best_acc = 0.0
    os.makedirs('models', exist_ok=True)  # 保存模型目录

    for epoch in range(1, Config.num_epochs + 1):
        model.train()  # 训练模式（启用 Dropout、BN 更新）
        running_loss = 0.0

        # 遍历训练集
        for imgs, labels in tqdm(train_loader, desc=f'Epoch {epoch}/{Config.num_epochs}'):
            imgs, labels = imgs.to(Config.device), labels.to(Config.device)

            optimizer.zero_grad()        # 清零梯度
            outputs = model(imgs)        # 前向传播
            loss = criterion(outputs, labels)
            loss.backward()              # 反向传播
            optimizer.step()             # 更新权重

            running_loss += loss.item() * imgs.size(0)  # 统计总 loss

        train_loss = running_loss / len(train_ds)  # 平均 loss

        # ---------- 5. 验证 ----------
        model.eval()  # 评估模式
        correct = total = 0
        all_preds, all_labels = [], []

        with torch.no_grad():  # 不计算梯度，加速
            for imgs, labels in test_loader:
                imgs, labels = imgs.to(Config.device), labels.to(Config.device)
                outputs = model(imgs)
                _, preds = torch.max(outputs, 1)  # 取概率最大类别

                total   += labels.size(0)
                correct += (preds == labels).sum().item()

                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_acc = correct / total
        print(f'Epoch {epoch:2d}: train_loss={train_loss:.4f}  val_acc={val_acc:.4f}')

        # 保存最好模型
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), 'models/best.pth')
            print(f'  --> 新最佳准确率，模型已保存')

    print('✅训练结束，最佳准确率：', best_acc, '耗时：', (time.time() - start_time)/60, 'min' )
    # 打印详细指标：precision, recall, f1-score
    print(classification_report(all_labels,
                                all_preds,
                                target_names=test_ds.classes))

if __name__ == '__main__':
    main()