import os

os.environ['OMP_NUM_THREADS'] = '1'
import time
import itertools
import argparse
import sys
import shutil
from distutils.dir_util import copy_tree
import datetime
import tqdm
import random
import numpy as np
import torch
from torch import optim
from torch.utils.data import DataLoader
from src.datasets import uvDataset
from src.models.dnr import MVDNR
from src.utils.logger import Logger
from src.utils.draw_curve import draw_curve
from src.utils.str2bool import str2bool
from src.trainer_mvdnr import DNRTrainer

def main(args):
    # check if in debug mode
    gettrace = getattr(sys, 'gettrace', None)
    if gettrace():
        print('Hmm, Big Debugger is watching me')
        is_debug = True
        torch.autograd.set_detect_anomaly(True)
    else:
        print('No sys.gettrace')
        is_debug = False

    # seed
    if args.seed is not None:
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed)

    # deterministic
    if args.deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        torch.autograd.set_detect_anomaly(True)
    else:
        torch.backends.cudnn.benchmark = True

    # dataset
    if 'modelnet' in args.dataset:
        if args.dataset == 'modelnet40_12':
            fpath = os.path.expanduser('/root/autodl-tmp/code/MVSelect/data/modelnet40_images_new_12x')
            num_cam = 12
        elif args.dataset == 'modelnet40_20':
            fpath = os.path.expanduser('/root/autodl-tmp/code/MVSelect/data/modelnet40v2png_ori4')
            num_cam = 20
        else:
            raise Exception

        args.task = 'mvcnn'
        result_type = ['prec']
        args.lr = 5e-5 if args.lr is None else args.lr
        args.select_lr = 1e-4 if args.select_lr is None else args.select_lr
        args.batch_size = 8 if args.batch_size is None else args.batch_size

        train_set = imgDataset(fpath, num_cam, split='train', )
        val_set = imgDataset(fpath, num_cam, split='train', per_cls_instances=25)
        test_set = imgDataset(fpath, num_cam, split='test', )
    elif 'uv' in args.dataset:
        if args.dataset == 'uv_custom_chair':
            fpath = os.path.expanduser('./data/ventura-chair-by-poliform')
            num_cam = args.num_cam
        elif args.dataset == 'uv_hotdog':
            fpath = os.path.expanduser('./data/hotdog')
            num_cam = args.num_cam
        elif args.dataset == 'uv_lego':
            fpath = os.path.expanduser('./data/lego')
            num_cam = args.num_cam
        elif args.dataset == 'uv_drums':
            fpath = os.path.expanduser('./data/drums')
            num_cam = args.num_cam
        elif args.dataset == 'uv_mic':
            fpath = os.path.expanduser('./data/mic')
            num_cam = args.num_cam
        elif args.dataset == 'uv_ficus':
            fpath = os.path.expanduser('./data/ficus')
            num_cam = args.num_cam
        elif args.dataset == 'uv_materials':
            fpath = os.path.expanduser('./data/materials')
            num_cam = args.num_cam
        elif args.dataset == 'uv_ship':
            fpath = os.path.expanduser('./data/ship')
            num_cam = args.num_cam
        elif args.dataset == 'uv_chair':
            fpath = os.path.expanduser('./data/chair')
            num_cam = args.num_cam
        else:
            raise Exception
        args.task = 'mvdnr'
        result_type = ['psnr','ssim','lpips']
        args.lr = 1e-2 if args.lr is None else args.lr
        args.select_lr = 1e-4 if args.select_lr is None else args.select_lr
        args.batch_size = 1
        train_set = uvDataset(fpath, num_cam, split='train', \
                            is_raytraced=args.is_raytraced, \
                            is_stack=args.steps>0, \
                            train_ratio=args.train_ratio, \
                            resize=args.img_resize, \
                            sel_ind=args.sel_ind, \
                            )
        
        val_set_raytrace = uvDataset(fpath, 1, split='val', is_raytraced=True, resize=args.img_resize)
        val_set_raster = uvDataset(fpath, 1, split='val', is_raytraced=False, resize=args.img_resize)
        test_set_raytrace = uvDataset(fpath, 1, split='test', is_raytraced=True, resize=args.img_resize)
        test_set_raster = uvDataset(fpath, 1, split='test', is_raytraced=False, resize=args.img_resize)
    else:
        if args.dataset == 'wildtrack':
            base = Wildtrack(os.path.expanduser('~/Data/Wildtrack'))
        elif args.dataset == 'multiviewx':
            base = MultiviewX(os.path.expanduser('~/Data/MultiviewX'))
        else:
            raise Exception('must choose from [wildtrack, multiviewx]')

        args.task = 'mvdet'
        result_type = ['moda', 'modp', 'prec', 'recall']
        args.lr = 5e-4 if args.lr is None else args.lr
        args.select_lr = 1e-4 if args.select_lr is None else args.select_lr
        args.batch_size = 1 if args.batch_size is None else args.batch_size

        train_set = frameDataset(base, split='trainval', world_reduce=args.world_reduce,
                                 img_reduce=args.img_reduce, world_kernel_size=args.world_kernel_size,
                                 img_kernel_size=args.img_kernel_size,
                                 dropout=args.dropcam, augmentation=args.augmentation)
        val_set = frameDataset(base, split='val', world_reduce=args.world_reduce,
                               img_reduce=args.img_reduce, world_kernel_size=args.world_kernel_size,
                               img_kernel_size=args.img_kernel_size)
        test_set = frameDataset(base, split='test', world_reduce=args.world_reduce,
                                img_reduce=args.img_reduce, world_kernel_size=args.world_kernel_size,
                                img_kernel_size=args.img_kernel_size)

    if args.steps:
        args.lr /= 5
        # args.epochs *= 2

    def seed_worker(worker_id):
        worker_seed = torch.initial_seed() % 2 ** 32
        np.random.seed(worker_seed)
        random.seed(worker_seed)

    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
                              pin_memory=True, worker_init_fn=seed_worker)
    val_raytrace_loader = DataLoader(val_set_raytrace, batch_size=1, shuffle=False, num_workers=args.num_workers,
                            pin_memory=True, worker_init_fn=seed_worker)
    val_raster_loader = DataLoader(val_set_raster, batch_size=1, shuffle=False, num_workers=args.num_workers,
                             pin_memory=True, worker_init_fn=seed_worker)
    test_raytrace_loader = DataLoader(test_set_raytrace, batch_size=1, shuffle=False, num_workers=args.num_workers,
                            pin_memory=True, worker_init_fn=seed_worker)
    test_raster_loader = DataLoader(test_set_raster, batch_size=1, shuffle=False, num_workers=args.num_workers,
                             pin_memory=True, worker_init_fn=seed_worker)
    N = train_set.num_cam

    # logging
    select_settings = f'steps{args.steps}_'
    selind_settings = f'selind{len(args.sel_ind) if len(args.sel_ind) else ""}_'
    lr_settings = f'base{args.base_lr_ratio}other{args.other_lr_ratio}' + \
                  f'select{args.select_lr}' if args.steps else ''
    logdir = f'logs/{args.dataset}/{"DEBUG_" if is_debug else ""}texdim_{args.texture_dim}_texfeature_{args.texture_features}_camnum_{args.num_cam}_{args.aggregation}_down{args.down}_imgresize{args.img_resize}_' \
             f'{"raytraced_" if args.is_raytraced else ""}' \
             f'{f"ps_{args.patch_size}_psel_{args.patch_sel_num}_" if args.patch_size else ""}' \
             f'{select_settings if args.steps else ""}' \
             f'{selind_settings if args.sel_ind else ""}' \
             f'lr{args.lr}{lr_settings}_b{args.batch_size}_e{args.epochs}_dropcam{args.dropcam}_trainratio_{args.train_ratio}_' \
             f'{datetime.datetime.today():%Y-%m-%d_%H-%M-%S}' if not args.eval \
        else f'logs/{args.dataset}/EVAL_{args.eval_mode}_{args.resume.replace(".pth","")}'
    os.makedirs(logdir, exist_ok=True)
    os.makedirs(os.path.join(logdir,"visual"), exist_ok=True)
    copy_tree('src', logdir + '/scripts/src')
    for script in os.listdir('.'):
        if script.split('.')[-1] == 'py':
            dst_file = os.path.join(logdir, 'scripts', os.path.basename(script))
            shutil.copyfile(script, dst_file)
    sys.stdout = Logger(os.path.join(logdir, 'log.txt'), )
    print(logdir)
    print('Settings:')
    print(vars(args))

    # model
    if args.task == 'mvcnn':
        model = MVCNN(train_set, args.arch, args.aggregation).cuda()
    elif args.task == 'mvdnr':
        model = MVDNR(train_set, aggregation=args.aggregation, \
                      tex_dim=args.texture_dim, texture_features=args.texture_features).cuda()
    else:
        model = MVDet(train_set, args.arch, args.aggregation,
                      args.use_bottleneck, args.hidden_dim, args.outfeat_dim).cuda()

    # load checkpoint
    # if args.steps:
    #     with open(f'logs/{args.dataset}/{args.arch}_performance.txt', 'r') as fp:
    #         result_str = fp.read()
    #     print(result_str)
    #     load_dir = result_str.split('\n')[1].replace('# ', '')
    #     pretrained_dict = torch.load(f'{load_dir}/model.pth')
    #     model_dict = model.state_dict()
    #     pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and 'select' not in k}
    #     model_dict.update(pretrained_dict)
    #     model.load_state_dict(model_dict)

    if args.resume:
        if ".pth" not in args.resume:
            print(f'loading checkpoint: logs/{args.dataset}/{args.resume}/model.pth')
            pretrained_dict = torch.load(f'logs/{args.dataset}/{args.resume}/model.pth')
        else:
            print(f'loading checkpoint: logs/{args.dataset}/{args.resume}')
            pretrained_dict = torch.load(f'logs/{args.dataset}/{args.resume}')
        model_dict = model.state_dict()
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and "data" not in k}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    
    if args.task == 'mvdnr':
        param_dicts = [{"params": [p for n, p in model.named_parameters() if 'texture' in n and p.requires_grad],
                        "lr": args.lr, },
                    {"params": [p for n, p in model.named_parameters() if ('unet' in n) and ('select' not in n) and ('texture' not in n) and (p.requires_grad)],
                        "lr": args.lr*0.1, },
                    {"params": [p for n, p in model.named_parameters() if 'select' in n and p.requires_grad],
                        "lr": args.select_lr, }, ]
    else:
        param_dicts = [{"params": [p for n, p in model.named_parameters()
                                if 'base' not in n and 'select' not in n and p.requires_grad],
                        "lr": args.lr * args.other_lr_ratio, },
                    {"params": [p for n, p in model.named_parameters() if 'base' in n and p.requires_grad],
                        "lr": args.lr * args.base_lr_ratio, },
                    {"params": [p for n, p in model.named_parameters() if 'select' in n and p.requires_grad],
                        "lr": args.select_lr, }, ]
    # optimizer = optim.Adam(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
    optimizer = optim.Adam(param_dicts, lr=args.lr, betas=(0.5, 0.999))

    # def warmup_lr_scheduler(epoch, warmup_epochs=0.1 * args.epochs):
    #     if epoch < warmup_epochs:
    #         return epoch / warmup_epochs
    #     else:
    #         return (np.cos((epoch - warmup_epochs) / (args.epochs - warmup_epochs) * np.pi) + 1) / 2
    # scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, warmup_lr_scheduler)

    def lambda_rule(epoch):
        lr_l = 1.0 - max(0, epoch - 2000) / float(args.epochs - 2000)
        return lr_l
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)

    if args.task == 'mvcnn':
        trainer = ClassifierTrainer(model, logdir, args)
    elif args.task == 'mvdnr':
        trainer = DNRTrainer(model, logdir, args)
    else:
        trainer = PerspectiveTrainer(model, logdir, args)

    # draw curve
    x_epoch = []
    train_loss_s = []
    train_prec_s = []
    val_loss_s = []
    val_prec_s = []
    test_loss_s = []
    test_prec_s = []

    # learn
    if not args.eval:
        best_prec = {
            "rasterization":{
                "epoch":0,
                "psnr":0.0,
                "ssim":0.0,
                "lpips":np.inf
            },
            "raytrace":{
                "epoch":0,
                "psnr":0.0,
                "ssim":0.0,
                "lpips":np.inf
            }
        }
        for epoch in tqdm.tqdm(range(1, args.epochs + 1)):
            print('Training...')
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, scheduler)
            if epoch % max(args.epochs // 10, 1) == 0:
                torch.save(model.state_dict(), os.path.join(logdir, 'model.pth'))
                print('Testing...')
                if args.steps > 0:
                    test_loss, test_prec = trainer.test(val_raster_loader, "rasterization")
                    if test_prec[0] >= best_prec["rasterization"]["psnr"] and test_prec[1] >= best_prec["rasterization"]["ssim"] and test_prec[2] <= best_prec["rasterization"]["lpips"]:
                        best_prec["rasterization"]["epoch"] = epoch
                        best_prec["rasterization"]["psnr"] = test_prec[0]
                        best_prec["rasterization"]["ssim"] = test_prec[1]
                        best_prec["rasterization"]["lpips"] = test_prec[2]
                        print("Update rasterization best loss/psnr/ssim/lpips: %f/%f/%f/%f"%(test_loss, test_prec[0], test_prec[1], test_prec[2]))
                        torch.save(model.state_dict(), os.path.join(logdir, 'model_rasterization_best_ep_%d_psnr_%.2f_ssim_%.4f_lpips_%.4f.pth')%(epoch, test_prec[0], test_prec[1], test_prec[2]))
                    test_loss_s.append(test_loss)
                    test_prec_s.append(test_prec[0])

                    action_dict = trainer.test(train_loader, "rasterization", is_eval=True)

                    print(' '.join('cam {} {:.3f} |'.format(cam, freq) for cam, freq in action_dict))
                    print('sel cam ind: ')
                    print(' '.join('{}'.format(cam) for cam, freq in action_dict[:args.steps]))
                else:
                    test_loss, test_prec = trainer.test(test_raytrace_loader, "raytrace_test")
                    if test_prec[0] >= best_prec["rasterization"]["psnr"] and test_prec[1] >= best_prec["rasterization"]["ssim"] and test_prec[2] <= best_prec["rasterization"]["lpips"]:
                        best_prec["rasterization"]["epoch"] = epoch
                        best_prec["rasterization"]["psnr"] = test_prec[0]
                        best_prec["rasterization"]["ssim"] = test_prec[1]
                        best_prec["rasterization"]["lpips"] = test_prec[2]
                        print("Update rasterization best loss/psnr/ssim/lpips: %f/%f/%f/%f"%(test_loss, test_prec[0], test_prec[1], test_prec[2]))
                        torch.save(model.state_dict(), os.path.join(logdir, 'model_rasterization_best_ep_%d_psnr_%.2f_ssim_%.4f_lpips_%.4f.pth')%(epoch, test_prec[0], test_prec[1], test_prec[2]))
                    test_loss_s.append(test_loss)
                    test_prec_s.append(test_prec[0])

                    val_loss, val_prec = trainer.test(val_raytrace_loader, "raytrace_val")
                    if val_prec[0] >= best_prec["raytrace"]["psnr"] and val_prec[1] >= best_prec["raytrace"]["ssim"] and val_prec[2] <= best_prec["raytrace"]["lpips"]:
                        best_prec["raytrace"]["epoch"] = epoch
                        best_prec["raytrace"]["psnr"] = val_prec[0]
                        best_prec["raytrace"]["ssim"] = val_prec[1]
                        best_prec["raytrace"]["lpips"] = val_prec[2]
                        print("Update raytrace best loss/psnr/ssim/lpips: %f/%f/%f/%f"%(val_loss, val_prec[0], val_prec[1], val_prec[2]))
                        torch.save(model.state_dict(), os.path.join(logdir, 'model_raytrace_best_ep_%d_psnr_%.2f_ssim_%.4f_lpips_%.4f.pth')%(epoch, val_prec[0], val_prec[1], val_prec[2]))
                    val_loss_s.append(val_loss)
                    val_prec_s.append(val_prec[0])
                # draw & save
                x_epoch.append(epoch)
                train_loss_s.append(train_loss)
                train_prec_s.append(train_prec)

                if args.steps > 0:
                    draw_curve(os.path.join(logdir, 'learning_curve_test.jpg'), x_epoch, train_loss_s, test_loss_s,
                            train_prec_s, test_prec_s)
                else:
                    draw_curve(os.path.join(logdir, 'learning_curve_test.jpg'), x_epoch, train_loss_s, test_loss_s,
                            train_prec_s, test_prec_s)
                    draw_curve(os.path.join(logdir, 'learning_curve_val.jpg'), x_epoch, train_loss_s, val_loss_s,
                            train_prec_s, val_prec_s)
                
    else:
        if args.eval_mode == "sel":
            action_dict = trainer.test(train_loader, "rasterization", is_eval=True)
            print(' '.join('cam {} {:.3f} |'.format(cam, freq) for cam, freq in action_dict))
            print('sel cam ind: ')
            print(' '.join('{}'.format(cam) for cam, freq in action_dict[:args.steps]))
        elif args.eval_mode == "val":
            val_loss, val_prec = trainer.test(val_raytrace_loader, "raytrace")
            print("Val Raytrace loss/psnr/ssim/lpips: %f/%f/%f/%f"%(val_loss, val_prec[0], val_prec[1], val_prec[2]))
        elif args.eval_mode == "test":
            test_loss, test_prec = trainer.test(test_raytrace_loader, "raytrace")
            print("Test Raytrace loss/psnr/ssim/lpips: %f/%f/%f/%f"%(test_loss, test_prec[0], test_prec[1], test_prec[2]))


if __name__ == '__main__':
    # common settings
    parser = argparse.ArgumentParser(description='view selection for multiview classification & detection')
    parser.add_argument('--eval', action='store_true', help='evaluation only')
    parser.add_argument('--arch', type=str, default='resnet18')
    parser.add_argument('--aggregation', type=str, default='max', choices=['mean', 'max', 'random', 'first'])
    parser.add_argument('-d', '--dataset', type=str, default='uv_hotdog',
                        choices=['wildtrack', 'multiviewx', 'modelnet40_12', 'modelnet40_20', 'uv_chair', 'uv_hotdog',\
                                 'uv_custom_chair','uv_lego','uv_drums','uv_mic','uv_ficus','uv_materials','uv_ship'])
    parser.add_argument('-j', '--num_workers', type=int, default=4)
    parser.add_argument('-b', '--batch_size', type=int, default=None, help='input batch size for training')
    parser.add_argument('--dropcam', type=float, default=0.0)
    parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train')
    parser.add_argument('--lr', type=float, default=None, help='learning rate for task network')
    parser.add_argument('--select_lr', type=float, default=None, help='learning rate for MVselect')
    parser.add_argument('--base_lr_ratio', type=float, default=1.0)
    parser.add_argument('--other_lr_ratio', type=float, default=1.0)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--visualize', action='store_true')
    parser.add_argument('--seed', type=int, default=None, help='random seed')
    parser.add_argument('--deterministic', type=str2bool, default=False)
    # MVSelect settings
    parser.add_argument('--steps', type=int, default=0,
                        help='number of camera views to choose. if 0, then no selection')
    parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor (default: 0.99)')
    parser.add_argument('--down', type=int, default=1, help='down sample the image to 1/N size')
    # parser.add_argument('--beta_entropy', type=float, default=0.01)
    # multiview detection specific settings
    parser.add_argument('--eval_init_cam', type=str2bool, default=False,
                        help='only consider pedestrians covered by the initial camera')
    parser.add_argument('--reID', action='store_true')
    parser.add_argument('--augmentation', type=str2bool, default=True)
    parser.add_argument('--id_ratio', type=float, default=0)
    parser.add_argument('--cls_thres', type=float, default=0.6)
    parser.add_argument('--alpha', type=float, default=1.0, help='ratio for per view loss')
    parser.add_argument('--use_mse', type=str2bool, default=False)
    parser.add_argument('--use_bottleneck', type=str2bool, default=True)
    parser.add_argument('--hidden_dim', type=int, default=128)
    parser.add_argument('--outfeat_dim', type=int, default=0)
    parser.add_argument('--world_reduce', type=int, default=4)
    parser.add_argument('--world_kernel_size', type=int, default=10)
    parser.add_argument('--img_reduce', type=int, default=12)
    parser.add_argument('--img_kernel_size', type=int, default=10)

    ## for DNR
    parser.add_argument('--is_raytraced', action='store_true', help='use raytraced')
    parser.add_argument('--num_cam', type=int, default=100)
    parser.add_argument('--texture_dim', type=int, default=2048)
    parser.add_argument('--texture_features', type=int, default=16)
    parser.add_argument('--img_resize', type=int, default=800)

    parser.add_argument('--lambda_L1', type=float, default=10.0, help='weighting of the L1 loss')
    parser.add_argument('--lambda_L1_Diff', type=float, default=10.0, help='weighting of the L1 loss of the image space gradients')
    parser.add_argument('--lambda_Reg_Tex', type=float, default=0.1, help='weighting of the regularizer of the texture')
    parser.add_argument('--lambda_VGG', type=float, default=10.0, help='weighting of the L1 loss')
    parser.add_argument('--lambda_L1_Texture', type=float, default=30.0, help='weighting of the L1 loss')
    parser.add_argument('--lambda_SSIM', type=float, default=30.0, help='weighting of the L1 loss')
    parser.add_argument('--lambda_fft', type=float, default=0.2, help='weighting of the L1 loss')

    parser.add_argument('--train_ratio', type=float, default=1.0, help='train ratio')
    parser.add_argument('--sel_ind', nargs='+', type=int, help='select ind', default=[])
    parser.add_argument('--patch_size', type=int, default=None)
    parser.add_argument('--patch_sel_num', type=int, default=5)
    parser.add_argument('--eval_mode', type=str, default='sel',
                        choices=['sel', 'test', 'val'])
    args = parser.parse_args()

    main(args)
