# new_train.py
import os, json, math
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, random_split
from datasets.dataset import TicketDataset
from models.price_predictor import PricePredictor
from tqdm import tqdm
import matplotlib.pyplot as plt

# ===== 路径与超参数 =====
CONFIG_PATH   = "movie-ticket-bidding/config/SqueezeExcitationModel.json"
TRAIN_PKL     = "movie-ticket-bidding/data/processed/train.pkl"
TEST_PKL      = "movie-ticket-bidding/data/processed/test.pkl"

BATCH_SIZE    = 512
NUM_EPOCHS    = 10
LEARNING_RATE = 1e-3
WEIGHT_DECAY  = 1e-4
VAL_SPLIT     = 0.1
DEVICE        = torch.device("cuda")

# ===== 输出目录 =====
cfg_name   = os.path.splitext(os.path.basename(CONFIG_PATH))[0]
out_dir    = os.path.join("movie-ticket-bidding", "checkpoints", cfg_name)
os.makedirs(out_dir, exist_ok=True)

BEST_PATH  = os.path.join(out_dir, "best_model.pt")
INFO_PATH  = os.path.join(out_dir, "training_info.txt")
PLOT_PATH  = os.path.join(out_dir, "训练指标曲线.png")

# ===== 数据加载 =====
full_ds   = TicketDataset(TRAIN_PKL)
val_len   = int(len(full_ds) * VAL_SPLIT)
train_len = len(full_ds) - val_len
train_ds, val_ds = random_split(full_ds, [train_len, val_len])

train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True)
val_loader   = DataLoader(val_ds,   batch_size=BATCH_SIZE)
test_loader  = DataLoader(TicketDataset(TEST_PKL), batch_size=BATCH_SIZE)

# ===== 构建模型 =====
with open(CONFIG_PATH, "r") as f:
    model_cfg = json.load(f)["model"]

input_dim = full_ds[0][0].shape[0]
model     = PricePredictor(CONFIG_PATH, input_dim).to(DEVICE)
print("📐 当前模型结构如下：")
print(model)

# ===== 损失与优化 =====
criterion = nn.MSELoss()
optimizer = optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)

# ===== 记录指标 =====
train_losses, val_losses, test_accs = [], [], []
best_test_acc = -math.inf

# ===== 训练循环 =====
for epoch in range(1, NUM_EPOCHS + 1):

    # ---------- 训练 ----------
    model.train()
    running_loss = 0.0
    loop = tqdm(train_loader, desc=f"[Epoch {epoch:02d}]")
    for x, y in loop:
        x, y = x.to(DEVICE), y.to(DEVICE)

        optimizer.zero_grad()
        pred  = model(x)
        loss  = criterion(pred, y)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * x.size(0)
        loop.set_postfix(train_loss=loss.item())

    avg_train_loss = running_loss / train_len
    train_losses.append(avg_train_loss)

    # ---------- 验证 ----------
    model.eval()
    val_loss = 0.0
    with torch.no_grad():
        for x, y in val_loader:
            x, y = x.to(DEVICE), y.to(DEVICE)
            val_loss += criterion(model(x), y).item() * x.size(0)
    avg_val_loss = val_loss / val_len
    val_losses.append(avg_val_loss)

    # ---------- 测试 ----------
    total_correct, total_mse, total_cnt = 0, 0.0, 0
    mae_fn = nn.L1Loss()
    total_mae = 0.0
    with torch.no_grad():
        for x, y in test_loader:
            x, y = x.to(DEVICE), y.to(DEVICE)
            pred = model(x)

            mse = criterion(pred, y)
            total_mse += mse.item() * x.size(0)
            total_mae += mae_fn(pred, y).item() * x.size(0)

            p = pred.squeeze().cpu().numpy()
            t = y.squeeze().cpu().numpy()
            total_correct += sum(abs(pi - ti) / (ti + 1e-8) <= 0.05 for pi, ti in zip(p, t))
            total_cnt += x.size(0)

    test_mse = total_mse / total_cnt
    test_mae = total_mae / total_cnt
    test_acc = total_correct / total_cnt
    test_accs.append(test_acc)

    # ---------- 输出 ----------
    print(f"✅ Epoch {epoch:02d} | "
          f"Train Loss: {avg_train_loss:.4f} | "
          f"Val Loss: {avg_val_loss:.4f} | "
          f"Test Acc@5%: {test_acc*100:.2f}% | "
          f"Test MSE: {test_mse:.4f}")

    # ---------- 保存最优 ----------
    if test_acc > best_test_acc:
        best_test_acc = test_acc
        torch.save(model.state_dict(), BEST_PATH)
        print(f"🎯 保存最优模型 (Acc@5%={best_test_acc*100:.2f}%) → {BEST_PATH}")

        with open(INFO_PATH, "w") as f:
            f.write(f"Config File: {CONFIG_PATH}\n")
            f.write(f"Epochs: {NUM_EPOCHS}\nBatch Size: {BATCH_SIZE}\n")
            f.write(f"Learning Rate: {LEARNING_RATE}\nWeight Decay: {WEIGHT_DECAY}\n")
            f.write(f"Best Test Acc@5%: {best_test_acc:.6f}\n")
            f.write(f"Model Config: {json.dumps(model_cfg, indent=2, ensure_ascii=False)}\n")

        with open(os.path.join(out_dir, "used_model_config.json"), "w") as f:
            json.dump(model_cfg, f, indent=2, ensure_ascii=False)

# ===== 绘制指标曲线 =====
plt.rcParams['font.family'] = ['SimHei']          # 尝试使用中文字体
plt.rcParams['axes.unicode_minus'] = False

epochs = range(1, NUM_EPOCHS + 1)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 10))

ax1.plot(epochs, train_losses, label="训练损失")
ax1.plot(epochs, val_losses,   label="验证损失")
ax1.set_xlabel("训练轮数")
ax1.set_ylabel("MSE 损失")
ax1.set_title("训练 / 验证损失曲线")
ax1.legend()

ax2.plot(epochs, [a*100 for a in test_accs], label="测试集准确率@5%")
ax2.set_xlabel("训练轮数")
ax2.set_ylabel("准确率 (%)")
ax2.set_title("测试集 Acc@5% 曲线")
ax2.legend()

plt.tight_layout()
plt.savefig(PLOT_PATH, dpi=150)
print(f"📈 指标曲线已保存至: {PLOT_PATH}")
