import numpy as np
import os
import torch
from model import get_model
from train import supervise
from datasets import get_loader
from tensorboardX import SummaryWriter
from utils import mk_path, _get_logger, loadyaml

if __name__ == '__main__':
    args = loadyaml("/home/ubuntu/code/pytorch_code/seg/code/config/deeplabv3plus_resnet50_720x960_80k_camvid11.yaml")  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    root = os.path.dirname(os.path.realpath(__file__))
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))
    args.finetune_save_path = os.path.join(args.save_path, "model", "finetune_model.pth")
    args.pretrain_save_path = os.path.join(args.save_path, "model", "pretrain_model.pth")
    args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")

    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    train_loader, test_loader = get_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // len(train_loader) + 1
    args.logger.info("==========> train_loader length:{}".format(len(train_loader) * args.batch_size))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader) * args.batch_size))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    model = get_model(args=args).to(device=args.device)
    supervise(model=model, train_loader=train_loader, test_loader=test_loader, args=args)
