# -*- coding: utf-8 -*-
# @Author  : xuelun

# common import
import os
import wandb
import argparse
import torch
import torch.optim as optim

# ourself import
from tools import common as com
from datasets.lidarcap_dataset import lidarcap_seq_data_generator as lidarcap_seq
# import dataloader
from datasets.dataloader import threaded_loader
# crafter for training
from tools import crafter
# servers for wandb
from tools.common import servers
# whole network

from tqdm import tqdm

from modules.regressor import Regressor, NaiveRegressor, FusionRegressor, VotingRegressor, AttentionRegressor
from modules.loss import Loss, GCNLoss


def get_model_and_loss(args):
    regress_mode = args.regress
    if regress_mode == 'naive':
        model = NaiveRegressor()
    elif regress_mode == 'hierarchy':
        model = Regressor(args.train_step)
    elif regress_mode == 'fusion':
        model = FusionRegressor(args.train_step)
    elif regress_mode == 'vote':
        model = VotingRegressor(args.train_step)
    elif regress_mode == 'attention':
        model = AttentionRegressor(args.train_step)
    return model, Loss()
    # if model_name == 'pointnet2' or model_name == 'pointconv':
    #     from modules.pointnet2 import PointNet2Encoder
    #     from modules.pointconv import PointConvDensityEncoder
    #     encoder = PointNet2Encoder() if model_name == 'pointnet2' else PointConvDensityEncoder()
    #     if regress_mode == 'naive':
    #         model = NaiveRegressor(encoder)
    #     elif regress_mode == 'hierarchy':
    #         model = Regressor(encoder, args.train_step)
    #     elif regress_mode == 'fusion':
    #         model = FusionRegressor(encoder, args.train_step)
    #     return model, Loss()
    # if model_name == 'dgcnn':
    #     from modules.dgcnn import DGCNNEncoder
    #     return Regressor(DGCNNEncoder(), regress_mode, args.train_step), Loss()
    # if model_name == 'gcn':
    #     from modules.network import Basic_Mesh_NET
    #     return Basic_Mesh_NET(args.train_step), GCNLoss()
    # if model_name == 'pointlstm':
    #     from modules.sppr import SPPR
    #     return SPPR(), Loss()
    # if model_name == 'sppr_p4d':
    #     from modules.sppr import SPPR
    #     return SPPR(extractor='p4d', train_step=args.train_step), Loss()


class MyTrainer(crafter.Trainer):
    def forward_backward(self, inputs):
        output = self.net(inputs)
        loss, details = self.loss_func(**output)
        loss.backward()
        return details

    def forward_val(self, inputs):
        output = self.net(inputs)
        loss, details = self.loss_func(**output)
        return details

    def forward_net(self, inputs):
        checkpoint = torch.load(args.ckpt_path)
        net = self.net
        # net.load_state_dict(checkpoint['state_dict'])
        output = net(inputs)
        return output


if __name__ == '__main__':
    '''
    script format when run train.py
        debug train: python train.py --debug --gpu x
        training: python train.py --gpu x
        evaluation: python train.py --debug --gpu x --eval
        visualization: python train.py --debug --gpu x --eval --visual --eval_bs 1 --cpkg_path
    '''
    # Hyperparameters from argparse and config-defaults.yaml
    parser = argparse.ArgumentParser()

    parser.add_argument('--regress', type=str)

    # bs
    parser.add_argument('--bs', type=int, default=8,
                        help='input batch size for training (default: 24)')
    parser.add_argument('--eval_bs', type=int, default=16,
                        help='input batch size for evaluation')
    # threads
    parser.add_argument('--threads', type=int, default=4,
                        help='Number of threads (default: 4)')
    # gpu
    parser.add_argument('--gpu', type=int, default=[0],
                        help='-1 for CPU', nargs='+')
    # lr
    parser.add_argument('--lr', type=float, default=0.0001,
                        help='Learning rate (default: 0.0001)')
    # epochs
    parser.add_argument('--epochs', type=int, default=100,
                        help='Traning epochs (default: 100)')
    # dataset
    parser.add_argument("--datasets", type=str, required=True)
    # server
    parser.add_argument("--server", type=str, default='local',
                        help=f'W&B server: {set(servers.keys())}',
                        choices=set(servers.keys()))
    # debug
    parser.add_argument('--debug', action='store_true',
                        help='For debug mode')
    # eval or visual
    parser.add_argument('--eval', default=False, action='store_true',
                        help=f'evaluation the trained model')

    parser.add_argument('--visual', default=False, action='store_true',
                        help=f'visualization the result ply')

    # extra things, ignored
    parser.add_argument('--use_image', default=False, action='store_true',
                        help=f'use image whether or not')
    parser.add_argument('--is_aug', default=False, action='store_true',
                        help=f'argument training dataset whether or not')
    parser.add_argument('--ckpt_path', type=str, default=None,
                        help=f'the saved ckpt needed to be evaluated or visualized')
    parser.add_argument('--regress_points', action='store_true')

    parser.add_argument('--git_id', type=str, default=None)
    parser.add_argument('--train_step', type=str, default='first',
                        help=f'choose from [first, second, whole]')
    args = parser.parse_args()

    if args.debug:
        os.environ['WANDB_MODE'] = 'dryrun'
    if not args.debug:
        com.login(args.server)

    # wandb.init(project='human_recons', entity='ljl36')
    wandb.init(project='human_recons', entity='zhangjingyi1')
    wandb.config.update(args, allow_val_change=True)
    config = wandb.config

    # Check devive, makedir, make reproducible
    #   save model for optimizer, like visualization image
    #   save model for best one in a training procedure
    iscuda = com.torch_set_gpu(config.gpu)
    # if not args.debug: com.reserve_mem(0.85)
    com.make_reproducible(iscuda, config.seed)
    if args.git_id is None:
        wandbid = [x for x in wandb.run.dir.split(
            '/') if wandb.run.id in x][-1]
    else:
        wandbid = args.git_id

    # optimizer save visualization results in optimizining
    optim_dir = os.path.join('output', wandbid, 'optimizer')
    com.mkdir(optim_dir)
    # model save models in training
    model_dir = os.path.join('output', wandbid, 'model')
    com.mkdir(model_dir)

    dataset = args.datasets

    # Load training and validation data
    if args.eval:
        velid_loader = threaded_loader(lidarcap_seq(config, dataset=dataset, is_train=False, is_use_image=args.use_image, is_aug=args.is_aug,
                                       is_visual=args.visual, need_depth=(args.regress == 'fusion')), threads=config.threads, batch_size=config.eval_bs, shuffle=False, drop_last=False)
        loader = dict(Velid=velid_loader)

    else:
        train_loader = threaded_loader(lidarcap_seq(config, dataset=dataset, is_train=True, is_use_image=args.use_image, is_aug=args.is_aug, need_depth=(args.regress == 'fusion')),
                                       threads=config.threads, batch_size=config.bs, shuffle=True, drop_last=False)
        valid_loader = threaded_loader(lidarcap_seq(config, dataset=dataset, is_train=False, is_use_image=args.use_image,
                                                    is_aug=args.is_aug, need_depth=(args.regress == 'fusion')), threads=config.threads, batch_size=config.bs, shuffle=False, drop_last=False)

        loader = dict(Train=train_loader, Valid=valid_loader)

    # Load and fix network config
    netConfig = com.load_configs('network.yaml')
    wandb.config.update(netConfig, allow_val_change=False)
    # Instance network and initialize its parameters(from-scratch or pre-trained)

    net, loss = get_model_and_loss(args)
    if args.ckpt_path is not None:
        save_model = torch.load(args.ckpt_path)['state_dict']
        model_dict = net.state_dict()
        state_dict = {k: v for k, v in save_model.items()
                      if k in model_dict.keys()}
        print(state_dict.keys())
        model_dict.update(state_dict)
        net.load_state_dict(model_dict)

    # Define optimizer
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           lr=config.lr, weight_decay=1e-4)
    # threshold = best * ( 1 - threshold ) in 'min' in 'rel'
    sc = netConfig.get('scheduler', {})
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min', factor=sc['factor'], patience=sc['patience'],
        threshold_mode='rel', threshold=sc['threshold'], min_lr=sc['min_lr'],
        verbose=True)

    # Instance tr
    train = MyTrainer(net, loader, loss, optimizer, wandbid=args.ckpt_path.split(
        '/')[-3] if args.eval else None)

    if iscuda:
        train = train.cuda()

    if args.eval:
        if args.visual:
            from tools import multiprocess
            from utils.save_visualization_file import save_ply
            vertices, filenames = train(
                epoch=1, istrain=False, velid=True, visual=True)

            # for vertex, filename in tqdm(zip(vertices, filenames)):
            #     save_ply(vertex, filename)

            multiprocess.multi_func(save_ply, 32, len(
                vertices), 'saving', False, vertices, filenames)

        else:
            # Evaluation loop
            loss_dict = train(epoch=1, istrain=False, velid=True)
            loss_log = {'e' + k: v for k, v in loss_dict.items()}
            print('Shape Error (NonParam): ', loss_dict['loss'])
    else:
        # Training loop
        mintloss, best_tacc = float('inf'), 0.
        minvloss, best_vacc = float('inf'), 0.
        for epoch in range(1, config.epochs + 1):
            print('')

            train_loss_dict = train(epoch)

            val_loss_dict = train(epoch, istrain=False)
            train_loss_log = {'train' + k: v for k,
                              v in train_loss_dict.items()}
            val_loss_log = {'val' + k: v for k, v in val_loss_dict.items()}
            epoch_logs = train_loss_log
            epoch_logs.update(val_loss_log)
            wandb.log(epoch_logs)

            # save model in this epoch
            # if this model is better, then save it as best
            if train_loss_dict['loss'] <= mintloss:
                mintloss = train_loss_dict['loss']
                best_save = os.path.join(model_dir, 'best-train-loss.pth')
                torch.save(
                    {'net': 'NETNAME()', 'state_dict': net.state_dict()}, best_save)
                com.hint(f"Saving best train loss model at epoch {epoch}")
            # if tacc >= best_tacc:
            #     best_tacc = tacc
            #     best_save = os.path.join(model_dir, 'best-train-acc.pth')
            #     torch.save({'net': 'NETNAME()', 'state_dict': net.state_dict()}, best_save)
            #     com.hint(f"Saving best train acc  model at epoch {epoch}")
            if val_loss_dict['loss'] <= minvloss:
                minvloss = val_loss_dict['loss']
                best_save = os.path.join(model_dir, 'best-valid-loss.pth')
                torch.save(
                    {'net': 'NETNAME()', 'state_dict': net.state_dict()}, best_save)
                com.hint(f"Saving best valid loss model at epoch {epoch}")
            # if vacc >= best_vacc:
            #     best_vacc = vacc
            #     best_save = os.path.join(model_dir, 'best-valid-acc.pth')
            #     torch.save({'net': 'NETNAME()', 'state_dict': net.state_dict()}, best_save)
            #     com.hint(f"Saving best valid acc  model at epoch {epoch}")

            com.clean_summary(wandb.run.summary)
            wandb.run.summary["best_train_loss"] = mintloss
            # wandb.run.summary["best_train_acc"] = best_tacc
            wandb.run.summary["best_valid_loss"] = minvloss
            # wandb.run.summary["best_valid_acc"] = best_vacc

            # scheduler
            scheduler.step(train_loss_dict['loss'])
