from codecs import getwriter
import sys
import os
import argparse
import time

import numpy as np

import torch
from torchvision import transforms
import torch.backends.cudnn as cudnn


from my_py_toolkit.log.logger import get_logger
from evaluate import evaluate
from model import SixDRepNet, SixDRepNet2
import datasets
from loss import GeodesicLoss

import torch.utils.model_zoo as model_zoo
import torchvision



os.environ['KMP_DUPLICATE_LIB_OK']='True'


def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(
        description='Head pose estimation using the 6DRepNet.')
    parser.add_argument(
        '--gpu', dest='gpu_id', help='GPU device id to use [0]',
        default='0', type=str)
    parser.add_argument(
        '--num_epochs', dest='num_epochs',
        help='Maximum number of training epochs.',
        default=30, type=int)
    parser.add_argument(
        '--batch_size', dest='batch_size', help='Batch size.',
        default=64, type=int)
    parser.add_argument(
        '--lr', dest='lr', help='Base learning rate.',
        default=0.00001, type=float)
    parser.add_argument(
        '--dataset', dest='dataset', help='Dataset type.',
        default='Pose_300W_LP', type=str) #Pose_300W_LP
    parser.add_argument(
        '--data_dir', dest='data_dir', help='Directory path for data.',
        default='/datasets/300W_LP', type=str)#BIWI_70_30_train.npz
    parser.add_argument(
        '--filename_list', dest='filename_list',
        help='Path to text file containing relative paths for every example.',
        default='datasets/300W_LP/files.txt', type=str) #BIWI_70_30_train.npz #300W_LP/files.txt
    parser.add_argument(
        '--dataset_test', dest='dataset_test', help='Dataset type.',
        default='Pose_300W_LP', type=str) #Pose_300W_LP
    parser.add_argument(
        '--data_dir_test', dest='data_dir_test', help='Directory path for data.',
        default='/datasets/300W_LP', type=str)#BIWI_70_30_train.npz
    parser.add_argument(
        '--filename_list_test', dest='filename_list_test',
        help='Path to text file containing relative paths for every example.',
        default='datasets/300W_LP/files.txt', type=str) #BIWI_70_30_train.npz #300W_LP/files.txt
    parser.add_argument(
        '--output_string', dest='output_string',
        help='String appended to output snapshots.', default='', type=str)
    parser.add_argument(
        '--snapshot', dest='snapshot', help='Path of model snapshot.',
        default='', type=str)
    parser.add_argument(
        '--show_viz', default=True
    )
    parser.add_argument(
        '--steps_eval', default=500, type=int
    )
    
    parser.add_argument(
        '--log_dir', default='./log'
    )
    parser.add_argument(
        '--output_dir', default='./output'
    )

    parser.add_argument('--backbone_name', default='')
    parser.add_argument(
        '--backbone_file', default=''
    )

    args = parser.parse_args()
    print(f'args: {args}')
    return args


def get_writer(logdir):
    from torch.utils.tensorboard import SummaryWriter
    return SummaryWriter(logdir)

def get_ignored_params(model):
    b = [model.layer0]
    #b = [model.conv1, model.bn1, model.fc_finetune]
    for i in range(len(b)):
        for module_name, module in b[i].named_modules():
            if 'bn' in module_name:
                module.eval()
            for name, param in module.named_parameters():
                yield param


def get_non_ignored_params(model):
    b = [model.layer1, model.layer2, model.layer3, model.layer4]
    for i in range(len(b)):
        for module_name, module in b[i].named_modules():
            if 'bn' in module_name:
                module.eval()
            for name, param in module.named_parameters():
                yield param


def get_fc_params(model):
    b = [model.linear_reg]
    for i in range(len(b)):
        for module_name, module in b[i].named_modules():
            for name, param in module.named_parameters():
                yield param


def load_filtered_state_dict(model, snapshot):
    # By user apaszke from discuss.pytorch.org
    model_dict = model.state_dict()
    snapshot = {k: v for k, v in snapshot.items() if k in model_dict}
    model_dict.update(snapshot)
    model.load_state_dict(model_dict)


if __name__ == '__main__':

    args = parse_args()
    writer = get_writer(args.log_dir)
    logger = get_logger(args.log_dir + '/run.log')
    cudnn.enabled = True
    num_epochs = args.num_epochs
    batch_size = args.batch_size
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    gpu_ids = list(range(len(args.gpu_id.split(','))))
    # gpu_ids = list(range(4))

    if not os.path.exists('output/snapshots'):
        os.makedirs('output/snapshots')

    summary_name = '{}_{}_bs{}'.format(
        'SixDRepNet', int(time.time()), args.batch_size)

    if not os.path.exists('output/snapshots/{}'.format(summary_name)):
        os.makedirs('output/snapshots/{}'.format(summary_name))

    model = SixDRepNet(backbone_name=args.backbone_name,
                        backbone_file=args.backbone_file,
                        deploy=False,
                        pretrained=True)
    if not args.snapshot == '':
        saved_state_dict = torch.load(args.snapshot)
        model.load_state_dict({k.replace('module.', '') :v for k,v in saved_state_dict['model_state_dict'].items()})

    print('Loading data.')

    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])
    transformations = transforms.Compose([transforms.Resize(240),
                                          transforms.RandomCrop(224),
                                          transforms.ToTensor(),
                                          normalize])
    pose_dataset = datasets.getDataset(
        args.dataset, args.data_dir, args.filename_list, transformations)
    train_loader = torch.utils.data.DataLoader(
        dataset=pose_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4)
    pose_dataset = datasets.getDataset(
        args.dataset_test, args.data_dir_test, args.filename_list_test, transformations, train_mode = False)
    test_loader = torch.utils.data.DataLoader(
        dataset=pose_dataset,
        batch_size=args.batch_size,
        num_workers=2)

    model.cuda(gpu_ids[0])
    crit =  GeodesicLoss().cuda(gpu_ids[0]) #torch.nn.MSELoss().cuda(gpu)

    optimizer = torch.optim.Adam([
        {'params': get_ignored_params(model), 'lr': 0},
        {'params': get_non_ignored_params(model), 'lr': args.lr},
        {'params': get_fc_params(model), 'lr': args.lr * 10}
    ], lr=args.lr)
    model = torch.nn.DataParallel(model, gpu_ids)

    if not args.snapshot == '':
        optimizer.load_state_dict(saved_state_dict['optimizer_state_dict'])

    #milestones = np.arange(num_epochs)
    milestones = [10, 15, 20, 25]
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=0.1)

    print('Starting training.')
    steps_per_epoch = len(train_loader)
    iter = 0
    for epoch in range(num_epochs):
        loss_sum = .0
        for i, (images, gt_mat, _, _) in enumerate(train_loader):
            iter += 1
            images = torch.Tensor(images).cuda(gpu_ids[0])

            # Forward pass
            pred_mat = model(images)

            # Calc loss
            loss = crit(gt_mat.cuda(gpu_ids[0]), pred_mat)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_sum += loss.item()
            writer.add_scalar('loss', loss.item(), epoch * steps_per_epoch + i)
            logger.info(f'loss: {loss.item()}, steps: {epoch * steps_per_epoch + i}')

            if (i+1) % 100 == 0:
                print('Epoch [%d/%d], Iter [%d/%d] Loss: '
                      '%.6f' % (
                          epoch+1,
                          num_epochs,
                          i+1,
                          steps_per_epoch,
                          loss.item(),
                      )
                      )
                logger.info('Epoch [%d/%d], Iter [%d/%d] Loss: '
                      '%.6f' % (
                          epoch+1,
                          num_epochs,
                          i+1,
                          steps_per_epoch,
                          loss.item(),
                      )
                      )
            if (iter + 1) % args.steps_eval == 0:
                evaluate(model, test_loader, iter, writer, args, gpu_ids)
                # todo: 临时这样改下，后续再重构下
                get_ignored_params(model)
                get_non_ignored_params(model)
                get_fc_params(model)
                print('Taking snapshot...',
                    torch.save({
                        'iter': iter,
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                    }, 'output/snapshots/' + summary_name + '/' + args.output_string +
                        '_iter_' + str(iter+1) + '.pth')
                    )
        scheduler.step()

        # Save models at numbered epochs.
        # if epoch % 1 == 0 and epoch < num_epochs:
