import torch

import runner2
import dataset
import model2
import assistant
import tokenizer

if __name__ == "__main__":
    charset_normalizer = tokenizer.ChineseCharacterTokenizer()
    charset_normalizer.build_vocab('../my_spider_douban_formal/output/clean_data.jsonl')
    model = model2.DoubanClassifierModel(len(charset_normalizer))
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
    loss_fun = torch.nn.CrossEntropyLoss()
    lr_schedule = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    train_iter, dev_iter, test_iter = dataset.get_dataloader('../my_spider_douban_formal/output/clean_data.jsonl', 256)
    runner_man = runner2.Runner(model, optimizer, loss_fun, train_iter, logger=assistant.logger, multi=False,
                                device=torch.device('cuda:0'), scheduler=lr_schedule, valid_loader=dev_iter)
    runner_man.load_checkpoint("./output/model2/model_params_400_2023-10-09_23-35-05.pth")
    runner_man.run(1000)
