#!/usr/bin/env python3
# coding: utf-8

import torch
import torch.nn as nn

from loss.param import _numpy_to_cuda
from loss.param import *

"""
from 3DDFA https://github.com/cleardusk/3DDFA
"""

_to_tensor = _numpy_to_cuda  # gpu


def _parse_param_batch(param):
    """Work for both numpy and tensor"""
    N = param.shape[0]
    p_ = param[:, :12].view(N, 3, -1)
    p = p_[:, :, :3]
    offset = p_[:, :, -1].view(N, 3, 1)
    alpha_shp = param[:, 12:12 + dim_shp].view(N, -1, 1)
    alpha_exp = param[:, 12 + dim_shp:].view(N, -1, 1)
    return p, offset, alpha_shp, alpha_exp


class RKPLoss(nn.Module):
    def __init__(self, opt_style='all', weight=None, resample_num=0, dif_s='l1'):
        """
        Args:
            opt_style: all, resample, emphasize, pose_only
            weight:
        """
        super(RKPLoss, self).__init__()

        self.dif_s = dif_s
        if weight is None:
            weight = [0.3, 0.7]
        self.resample_num = resample_num
        self.weight = weight
        self.u = _to_tensor(u)
        self.param_mean = _to_tensor(mean_std.get('mean')).float()
        self.param_std = _to_tensor(mean_std.get('std')).float()
        self.w_shp = _to_tensor(w_shp).float()
        self.w_exp = _to_tensor(w_exp).float()

        self.keypoints = _to_tensor(key_point)
        self.u_base = self.u[self.keypoints]
        self.w_shp_base = self.w_shp[self.keypoints]
        self.w_exp_base = self.w_exp[self.keypoints]

        self.w_shp_length = self.w_shp.shape[0] // 3
        self.scale = 1.0

        self.opt_style = opt_style
        # ind_68to21 = [[18], [20], [22], [23], [25], [27], [37], [37, 38, 39, 40, 41, 42], [40], [43],
        #               [43, 44, 45, 46, 47, 48],
        #               [46], [3], [32], [31], [36], [15], [49], [61, 62, 63, 64, 65, 66, 67, 68], [55], [9]]
        #
        # ind_68to21 = [[28, 29, 30, 31, 32, 33, 34, 35, 36], [61, 62, 63, 64, 65, 66, 67, 68]]
        ind_68to21 = [[1, 7, 9, 11, 17], [18, 22, 23, 27, 37, 40, 43, 46], [28, 34, 32, 36, 49, 55, 52, 58]]

        self.weight_68 = [1.0] * 68
        weight_21 = 2.0
        for i in range(len(ind_68to21)):
            # w_21 = weight_21 / len(ind_68to21[i])
            w_21 = weight_21
            for j in range(len(ind_68to21[i])):
                self.weight_68[ind_68to21[i][j] - 1] = 1.0 * w_21
        self.weight_68 = torch.tensor(self.weight_68).cuda()

    def set_scale(self, scale):
        self.scale = scale

    def reconstruct_and_parse(self, input, target):
        # 针对pose和face分开的情况
        if input.shape[1] == 12:
            t = target.clone()
            t[:, :12] = input
            input = t
        elif input.shape[1] == 199+29:
            t = target.clone()
            t[:, 12:] = input
            input = t
        # reconstruct
        param = input * self.param_std + self.param_mean
        param_gt = target * self.param_std + self.param_mean

        # parse param
        p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
        pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)

        return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)

    def forward_all(self, input, target):
        (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
            = self.reconstruct_and_parse(input, target)

        N = input.shape[0]
        offset[:, -1] = offsetg[:, -1]
        gt_vertex = pg @ (self.u + self.w_shp @ alpha_shpg + self.w_exp @ alpha_expg) \
            .view(N, -1, 3).permute(0, 2, 1) + offsetg
        vertex = p @ (self.u + self.w_shp @ alpha_shp + self.w_exp @ alpha_exp) \
            .view(N, -1, 3).permute(0, 2, 1) + offset

        diff = torch.abs(gt_vertex - vertex)
        loss = torch.mean(diff)/(68*3)
        # print(loss)
        return loss * self.scale

    def forward_resample(self, input, target):
        (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
            = self.reconstruct_and_parse(input, target)
        resample_num = self.resample_num
        # resample index
        if resample_num != 0:
            index = torch.randperm(self.w_shp_length)[:resample_num].reshape(-1, 1)
            keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1)
            if torch.cuda.is_available():
                keypoints_resample = keypoints_resample.cuda()
            keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
            w_shp_base = self.w_shp[keypoints_mix]
            u_base = self.u[keypoints_mix]
            w_exp_base = self.w_exp[keypoints_mix]
        else:
            w_shp_base = self.w_shp[self.keypoints]
            u_base = self.u[self.keypoints]
            w_exp_base = self.w_exp[self.keypoints]

        offset[:, -1] = offsetg[:, -1]

        N = input.shape[0]
        gt_vertex = pg @ (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg) \
            .view(N, -1, 3).permute(0, 2, 1) + offsetg
        vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp) \
            .view(N, -1, 3).permute(0, 2, 1) + offset

        loss = torch.mean(((gt_vertex - vertex)**2)*self.weight_68)/(68*3)
        # print(loss_large)
        return loss

    def forward_kpt(self, input, target):
        (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
            = self.reconstruct_and_parse(input, target)
        resample_num = self.resample_num
        # resample index
        if resample_num != 0:
            index = torch.randperm(self.w_shp_length)[:resample_num].reshape(-1, 1)
            keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1)
            if torch.cuda.is_available():
                keypoints_resample = keypoints_resample.cuda()
            keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
            w_shp_base = self.w_shp[keypoints_mix]
            u_base = self.u[keypoints_mix]
            w_exp_base = self.w_exp[keypoints_mix]
        else:
            w_shp_base = self.w_shp[self.keypoints]
            u_base = self.u[self.keypoints]
            w_exp_base = self.w_exp[self.keypoints]

        offset[:, -1] = offsetg[:, -1]

        N = input.shape[0]
        gt_vertex = pg @ (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg) \
            .view(N, -1, 3).permute(0, 2, 1) + offsetg
        # gt_vertex[1, :] = 120 + 1 - gt_vertex[1, :]
        vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp) \
            .view(N, -1, 3).permute(0, 2, 1) + offset
        # vertex[1, :] = 120 + 1 - vertex[1, :]

        dif = (gt_vertex-vertex)*self.weight_68

        if self.dif_s == 'l1':
            dif = torch.abs(dif)
        else:
            dif = dif ** 2
        loss = dif.mean() * self.scale
        # loss = ((gt_vertex-vertex)**2).mean()/(68*3)

        return loss

    def forward(self, input, target):
        input = input[:, :240]
        target = target[:, :240]
        if self.opt_style == 'all':
            return self.forward_all(input, target)
        elif self.opt_style == 'resample':
            return self.forward_resample(input, target)
        elif self.opt_style == 'emphasize':
            return self.forward_all(input, target) * self.weight[0] + \
                   self.forward_resample(input, target) * self.weight[1]
        elif self.opt_style == 'kpt':
            return self.forward_kpt(input, target)
        else:
            raise Exception(f'Unknown opt style: {self.opt_style}')


if __name__ == '__main__':
    pass
