import os
import torch
import argparse
from torch.backends import cudnn
from models.MIMOUNet import build_net
from train import _train
from dist import init_distributed_mode,is_distributed,is_primary
from apex.parallel import DistributedDataParallel, convert_syncbn_model
from apex import amp
def main(args):
    # CUDNN
    init_distributed_mode(args)

    cudnn.benchmark = True

    os.makedirs('results/',exist_ok=True)
    os.makedirs(os.path.join('results/', args.model_name),exist_ok=True)
    os.makedirs(os.path.join('results/', args.model_name, args.exp_name),exist_ok=True)
    os.makedirs(args.model_save_dir,exist_ok=True)
    os.makedirs(args.result_dir,exist_ok=True)
    os.makedirs(args.logs_save_dir,exist_ok=True)

    model = build_net(args.model_name)
    if is_primary():
        total_params = sum(p.numel() for p in model.parameters())
        print(f'{total_params:,} total parameters.')
        total_trainable_params = sum(
            p.numel() for p in model.parameters() if p.requires_grad)
        print(f'{total_trainable_params:,} training parameters.')
    
    model.cuda()
    if is_distributed():
        # model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[args.gpu])
        # model = convert_syncbn_model(model)
        model = DistributedDataParallel(model)
    if args.mode == 'train':
        _train(model, args)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    # Directories
    parser.add_argument('--model_name', default='MIMO-UNet', choices=[
        'MIMO-UNet', 
        'MIMO-UNetPlus',
        'MIMO-UNet-SNN',
        'MIMO-UNet-SNN-MSRESNET',
        'MIMO-UNet-SNN-FIRE',
        'MIMO-UNet-SNN-SHUFFLE',
        'MIMO-UNet-SNN-SHUFFLE-SCNN',
        'MIMO-UNet-SNN-FIRE-SCNN',
        'MIMO-UNet-SNN-FIRE-ADATH',
        ], type=str)
    parser.add_argument('--data_dir', type=str, default='./CamVid')
    parser.add_argument('--exp_name', type=str, default='./exp')
    parser.add_argument('--mode', default='test', choices=['train', 'test'], type=str)

    # Train
    parser.add_argument('--batch_size', type=int, default=4)
    parser.add_argument('--learning_rate', type=float, default=1e-4)
    parser.add_argument('--weight_decay', type=float, default=0)
    parser.add_argument('--num_epoch', type=int, default=3000)
    parser.add_argument('--print_freq', type=int, default=10)
    parser.add_argument('--num_worker', type=int, default=8)
    parser.add_argument('--save_freq', type=int, default=5)
    parser.add_argument('--valid_freq', type=int, default=1)
    parser.add_argument('--resume', type=str, default='')
    parser.add_argument('--gamma', type=float, default=0.5)
    parser.add_argument('--lr_steps', type=list, default=[(x+1) * 500 for x in range(3000//500)])
    parser.add_argument('--num_classes', type=int, default=12, help="how many classes training for")

    parser.add_argument("--local_rank", type=int,default=0)
    parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0,1 or cpu)')
    # 开启的进程数(注意不是线程),不用设置该参数，会根据nproc_per_node自动设置
    parser.add_argument('--world-size', default=4, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    # Test
    parser.add_argument('--test_model', type=str, default='weights/MIMO-UNet.pkl')
    parser.add_argument('--save_image', type=bool, default=False, choices=[True, False])

    args = parser.parse_args()
    args.logs_save_dir = os.path.join('results/', args.model_name, args.exp_name, 'logs')
    args.text_logs_dir = os.path.join('results/', args.model_name, args.exp_name, 'logs','logs.txt')
    args.model_save_dir = os.path.join('results/', args.model_name, args.exp_name, 'weights/')
    args.result_dir = os.path.join('results/', args.model_name, args.exp_name, 'result_image/')
    main(args)
