import argparse
import datetime
import logging
import os.path
import time
from pathlib import Path

import torchvision.models

from model.mobilenet_v1 import mobilenet_1

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

from torch import nn
from loss.swpd_loss import SWPDLoss
import numpy as np
import torch
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.transforms import transforms

from dataLoader import ToTensor, Normalize, LP300WDataSet
from loss.rpk_loss import RKPLoss
from loss.wpdc_loss import WPDCLoss
from model.mobilenetv3 import MobileNetV3_Small, MobileNetV3_Large, MobileNetV3_Small_MultiOut, \
    MobileNetV3_Large_MultiOut, MobileNetV3_Small_MultiOut_v2

lr = None
epoch_vdc = 500


def parse_args():
    parser = argparse.ArgumentParser(description='2d img-> 3DMM param')
    parser.add_argument('-j', '--workers', default=16, type=int)
    # parser.add_argument('-j', '--workers', default=0, type=int)
    parser.add_argument('--epochs', default=300, type=int)
    parser.add_argument('--start-epoch', default=1, type=int)
    parser.add_argument('-b', '--batch-size', default=256, type=int)
    parser.add_argument('-vb', '--val-batch-size', default=32, type=int)
    parser.add_argument('--base_lr', '--learning-rate', default=0.002, type=float)
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float)
    parser.add_argument('--print-freq', '-p', default=200, type=int)
    parser.add_argument('--resume', default=None, type=str, metavar='PATH')
    # parser.add_argument('--resume', default='sn/epoch_200.pth.tar', type=str, metavar='PATH')
    parser.add_argument('--mode', default='gpu', type=str)
    parser.add_argument('--snapshot', default='sn/', type=str, metavar='PATH')
    parser.add_argument('--log_fp', default='log/', type=str, metavar='PATH')
    parser.add_argument('--num_classes', default=12, type=int)
    parser.add_argument('--param_fp', default='dataset/', type=str, metavar='PATH')
    # parser.add_argument('--ori_fp', default='D:/pyProject/Datasets/3DFFA/train_aug_120x120/',
    #                     type=str, metavar='PATH')
    parser.add_argument('--ori_fp', default='/home/chenxianhao/train_aug_120x120/', type=str, metavar='PATH')
    parser.add_argument('--opt-style', default='resample', type=str)

    ret = parser.parse_args()
    return ret


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.val = 0
        self.avg = 0
        # self.sum = 0
        self.count = 0

    def reset(self):
        self.val = 0
        self.avg = 0
        # self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        # self.sum += val * n
        self.count += n
        self.avg += ((val - self.avg) * n) / self.count


def validate(val_loader, model, criterion, epoch, log=True):
    model.eval()

    end = time.time()
    losses_list = []
    with torch.no_grad():
        for k in range(len(criterion[0])):
            losses_list.append([])
        for i, (input, target) in enumerate(val_loader):
            # compute output
            input = input.float()
            target = target.float()
            input.requires_grad = False
            target.requires_grad = False
            if args.mode == 'gpu':
                input = input.cuda(non_blocking=True)
                target = target.cuda(non_blocking=True)
            output = model(input)
            # losses_list[0].append(criterion[0][0](output[:, :12], target[:, :12]).cpu())
            # losses_list[1].append(criterion[0][1](output[:, 12:], target).cpu())
            #
            for k in range(len(criterion[0])):
                losses_list[k].append(criterion[0][k](output, target).cpu())

        elapse = time.time() - end
        if log:
            s = f'Val: [{epoch}][{len(val_loader)}] Time {elapse:.3f}'
            for k in range(len(criterion[0])):
                s += f' loss_{k}: {np.mean(losses_list[k])}'
            # loss_w = np.mean(losses_wpdc)
            logging.info(s)
    return np.mean(losses_list[0]).item()


def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    loss_Meters = []
    for k in range(len(criterion[0])):
        loss_Meters.append(AverageMeter())
    model.train()

    end = time.time()
    # loader is batch style
    # for i, (input, target) in enumerate(train_loader):
    for i, (input, target) in enumerate(train_loader):
        target.requires_grad = False
        input = input.float()
        target = target.float()
        if args.mode == 'gpu':
            input = input.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)
        # output = model(input)
        output = model(input)

        loss_list = []
        for k in range(len(criterion[0])):
            loss_list.append(criterion[0][k](output, target))

        loss = loss_list[0] * criterion[1][0]
        loss_Meters[0].update(loss_list[0].item(), input.size(0))
        for k in range(1, len(loss_list)):
            loss += loss_list[k] * criterion[1][k]
            loss_Meters[k].update(loss_list[k].item(), input.size(0))
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    #
    s = f'Epoch: [{epoch}] LR: {lr:.8f} Time {batch_time.val:.3f} ' \
        f'({batch_time.avg:.3f})'
    for k in range(len(loss_Meters)):
        s += f' loss_{k}: {loss_Meters[k].avg}'

    logging.info(s)
    return loss_Meters[0].avg

    # if epoch > args.epochs * 0.5:
    #     criterion[0][1].set_weight([0.4, 0.4, 0.2])
    #     print(f'wpdc-{criterion[0][1].get_weight()}')


args = parse_args()

if __name__ == '__main__':
    if not os.path.exists(args.log_fp):
        os.mkdir(args.log_fp)
    if not os.path.exists(args.snapshot):
        os.mkdir(args.snapshot)

    lr = args.base_lr

    current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

    logging.basicConfig(
        format='[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] %(message)s',
        level=logging.INFO,
        handlers=[
            logging.FileHandler(args.log_fp + str(current_time) + '.log', mode='w'),
            logging.StreamHandler()
        ]
    )

    std_size = 120
    # 加载数据集
    logging.info(f'load dataset, data_size:{std_size}')

    transform = transforms.Compose([ToTensor(), Normalize()])
    train_dataset = LP300WDataSet(args.ori_fp, args.param_fp, True, transform, std_size)
    val_dataset = LP300WDataSet(args.ori_fp, args.param_fp, False, transform, std_size)
    logging.info(f'train--{len(train_dataset)}, val--{len(val_dataset)}')
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers,
                              shuffle=True, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.val_batch_size, num_workers=args.workers,
                            shuffle=False, pin_memory=True)

    # ------------one shot ---------------

    logging.info('load model')
    #
    model = mobilenet_1(num_classes=240)
    # model = torchvision.models.mobilenet_v3_large(weights=torchvision.models.MobileNet_V3_Large_Weights.IMAGENET1K_V1)
    # model.classifier[3] = nn.Linear(in_features=1280, out_features=240, bias=True)
    # model = MobileNet(widen_factor=1.0, num_classes=12, num_classes_2=228, input_channel=3)
    # model = MobileNetV3_Small_MultiOut()
    # model = MobileNetV3_Small_MultiOut_v2()
    # model = MobileNetV3_Large_MultiOut()
    # model = MobileNetV3_Large(240)
    # model = MobileNetV3_Small(240)

    if args.resume is not None:
        if Path(args.resume).is_file():
            logging.info(f'loading checkpoint {args.resume}')
            checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
            model.load_state_dict(checkpoint)
        else:
            logging.info(f'no checkpoint found at {args.resume}')

    if args.mode == 'gpu':
        model = model.cuda()

    # logging.info(f'loading checkpoint')
    # checkpoint = torch.load('/home/chenxianhao/FaceRecon3dM/sn/pose_epoch_50.pth.tar',
    #                         map_location=lambda storage, loc: storage)
    # pose_model.load_state_dict(checkpoint)

    # optimizer = torch.optim.Adam(model.parameters(),
    #                              lr=args.base_lr,
    #                              weight_decay=args.weight_decay)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # vdc_loss_all = VDCLoss(opt_style="all")
    wpdc_loss = SWPDLoss()
    # lm_loss = LmLoss()
    # wpdc_loss = WPDCLoss()
    # mse_loss = nn.MSELoss()
    rpk_loss = RKPLoss(opt_style='kpt', resample_num=0)
    # criterion = [[wpdc_loss, lm_loss], [1.0, 0.01]]
    criterion = [[wpdc_loss], [1.0]]
    criterion_val = [[rpk_loss], [1.0]]
    if args.mode == 'gpu':
        for k in range(len(criterion[0])):
            criterion[0][k] = criterion[0][k].cuda()
        for k in range(len(criterion_val[0])):
            criterion_val[0][k] = criterion_val[0][k].cuda()
    logging.info(f'weight-{criterion[1]}')

    step_size = 50
    gamma = 0.5
    weight_update = 10
    epoch_vdc = 30
    scheduler = StepLR(optimizer, step_size=50, gamma=0.5)

    logging.info(f'batch_size:{args.batch_size}, model: mbv1 swpdl, rpkl'
                 f' learning rate: {args.base_lr} step_size: {step_size} gamma:{gamma}'
                 f' weight_up:{weight_update} rpk_st:{epoch_vdc}')

    logging.info(f'wpdcl p_w:{wpdc_loss.p_w}')

    logging.info('start train')

    last_loss = None
    for epoch in range(args.start_epoch, args.epochs + 1):
        if epoch % weight_update == 1:
        # if True:
            criterion[0][0].update_weight(True)
            validate(train_loader, model, criterion, epoch, False)
            criterion[0][0].update_weight(False)
        #     criterion[0][0].set_scale(1.0)
        #     # now_loss = validate(train_loader, model, criterion, epoch, False)
        #     # if last_loss is None:
        #     #     criterion[0][0].set_scale(1.0)
        #     # else:
        #     #     criterion[0][0].set_scale(last_loss / now_loss)
            s = f'p_w: {criterion[0][0].get_weight()[0]:.3f} ' \
                f's_w: {criterion[0][0].get_weight()[1]:.3f} ' \
                f'e_w: {criterion[0][0].get_weight()[2]:.3f} '
        #         # f'scale: {criterion[0][0].get_scale():.3f}'
            logging.info(s)
        # if epoch == epoch_vdc + 1:
        #     criterion[0][0].set_scale(1.0)
        #     now_loss = validate(train_loader, model, criterion, epoch, False)
        #     criterion[0][0].set_scale(last_loss / now_loss)

        last_loss = train(train_loader, model, criterion, optimizer, epoch)
        filename = f'{args.snapshot}epoch_{epoch}.pth.tar'

        logging.info(f'save epoch-{epoch}')
        torch.save(model.state_dict(), filename)

        validate(val_loader, model, criterion_val, epoch)

        if epoch == epoch_vdc:
            criterion = [[wpdc_loss, rpk_loss], [1.0, 0.1]]
            print("start rpk")
        scheduler.step()
        lr = scheduler.get_last_lr()[0]

    logging.info("finsh train")
