from collections import OrderedDict
from tqdm import tqdm
import argparse
import torch
from dataset.cad_dataset import get_dataloader
from config import ConfigLSTM  # 修改为使用LSTM的配置
from utils import cycle
from trainer import TrainerLSTM

torch.backends.cudnn.enabled = False

def main():
    cfg = ConfigLSTM('train')  # 适应新的配置

    tr_agent = TrainerLSTM(cfg)

    if cfg.cont:
        tr_agent.load_ckpt(cfg.ckpt)

    train_loader = get_dataloader('train', cfg)
    val_loader = get_dataloader('validation', cfg)
    val_loader_all = get_dataloader('validation', cfg)
    val_loader = cycle(val_loader)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    clock = tr_agent.clock

    for e in range(clock.epoch, cfg.nr_epochs):
        pbar = tqdm(train_loader)
        for b, data in enumerate(pbar):
            data = {k: v.to(device) for k, v in data.items() if isinstance(v, torch.Tensor)}
            outputs, losses = tr_agent.train_func(data)

            pbar.set_description("EPOCH[{}][{}]".format(e, b))
            pbar.set_postfix(OrderedDict({k: v.item() for k, v in losses.items()}))

            if clock.step % cfg.val_frequency == 0:
                val_data = next(val_loader)
                val_data = {k: v.to(device) for k, v in val_data.items() if isinstance(v, torch.Tensor)}
                outputs, losses = tr_agent.val_func(val_data)

            clock.tick()
            tr_agent.update_learning_rate()

        if clock.epoch % 5 == 0:
            tr_agent.evaluate(val_loader_all)

        clock.tock()

        if clock.epoch % cfg.save_frequency == 0:
            tr_agent.save_ckpt()

        tr_agent.save_ckpt('latest')


if __name__ == '__main__':
    main()

