"""
本文件是主要的训练文件
"""

from torch import nn
from tqdm import tqdm
import torch
import copy
import datetime
import os

from data_process import train_data_loader
from my_model import MyRnn
from config import *


# 1. 准备数据
train_loader, valid_loader = train_data_loader()

# 2. 训练准备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("train device: ", device)
net = MyRnn(100).to(device)
criterion = nn.MSELoss(reduction="mean")
optimizer = torch.optim.Adam(net.parameters(), lr=LEARNING_RATE)
best_lost, best_net, easy_stop_count = float("inf"), None, 0


# 3. 开始训练
for epoch in range(NUM_EPOCHS):
    # 训练环节
    net.train()
    loss_record = []
    train_pbar = tqdm(train_loader, position=0, leave=True)  # 每个 epoch 的进度条
    train_pbar.set_description(f"Epoch {epoch + 1}/{NUM_EPOCHS}")

    for x, y in train_pbar:
        optimizer.zero_grad()  # 梯度归零
        x, y = x.to(device), y.to(device)
        y = y.reshape(-1, 1)
        y_hat = net(x)
        loss = criterion(y_hat, y)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新参数
        loss_record.append(loss.item())
        
        train_pbar.set_postfix({"loss": loss.item()})
    
    mean_train_loss = sum(loss_record) / len(loss_record)

    # 验证环节
    net.eval()
    loss_record = []
    valid_pbar = tqdm(valid_loader, position=0, leave=True)
    valid_pbar.set_description(f"Epoch {epoch + 1}/{NUM_EPOCHS}")

    for x, y in valid_pbar:
        x, y = x.to(device), y.to(device)
        y = y.reshape(-1, 1)
        with torch.no_grad():
            y_hat = net(x)
            loss = criterion(y_hat, y)
            loss_record.append(loss.item())

            valid_pbar.set_postfix({"loss": loss.item()})
    
    # 每个 epoch 最终的 loss
    mean_valid_loss = sum(loss_record) / len(loss_record)
    print(f"Epoch {epoch + 1}/{NUM_EPOCHS}, train loss: {mean_train_loss}, valid loss: {mean_valid_loss}")

    # 保存最好的模型
    if mean_valid_loss < best_lost:
        best_lost, best_net = mean_valid_loss, copy.deepcopy(net)
        easy_stop_count = 0
    else:
        easy_stop_count += 1
    
    # 超过 EASY_STOP 就停止训练
    if easy_stop_count >= EASY_STOP:
        print(f"Early stop at epoch {epoch + 1}/{NUM_EPOCHS}")
        break

    # 验证集 loss 降低到一定值就开始降低学习率
    if optimizer.param_groups[0]['lr'] > LOW_BOUND and easy_stop_count >= 30:
        optimizer.param_groups[0]['lr'] /= 10.0
        print(f"Adjust the learning rate {epoch + 1}/{NUM_EPOCHS} Current learning rate: {optimizer.param_groups[0]['lr']}")
        easy_stop_count = 0


# 4. 保存模型
torch.save(
    best_net.state_dict(),
    os.path.join(MODEL_SAVE_PATH, f"xxxxx-{datetime.datetime.now().strftime('%m-%d-%H-%M')}.pth")
)
