# -*- coding: utf-8 -*-

"""
Created on 03/23/2022
main.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

from configs import *
from utils.network_utils import get_network
from utils.data_utils import get_dataloader
from train_test import *

import os
from datetime import datetime
import argparse
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp


def main():
    config = init_config()
    logger, writer = init_logger(config)

    state = None
    # ===== build/load model =====
    model = get_network(config.network, config.depth, config.dataset, use_bn=config.get('use_bn', True))
    # model = model.cuda()

    # ===== get dataloader =====
    # trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, 4, root=config.dp)

    # ===== train =====
    config.world_size = config.gpus * config.nodes
    os.environ['MASTER_ADDR'] = '192.168.1.40'
    os.environ['MASTER_PORT'] = '9999'
    mp.spawn(train_once, nprocs=config.gpus, args=(
        model, config, writer, logger, state, config.lr_mode, config.optim_mode,))

    # tr_str, print_inf = train_once(0, model, trainloader, testloader, config, writer, logger, state, config.lr_mode, config.optim_mode)
    # config.send_mail_str += print_inf
    # config.send_mail_str += tr_str
    # print(config.send_mail_str)


if __name__ == '__main__':
    main()
