import torch
from torch import nn

from loss.param import *
from loss.param import _numpy_to_cuda

_to_tensor = _numpy_to_cuda


def _parse_param_batch(param):
    """Work for both numpy and tensor"""
    N = param.shape[0]
    p_ = param[:, :12].view(N, 3, -1)
    p = p_[:, :, :3]
    offset = p_[:, :, -1].view(N, 3, 1)
    alpha_shp = param[:, 12:12 + dim_shp].view(N, -1, 1)
    alpha_exp = param[:, 12 + dim_shp:].view(N, -1, 1)
    return p, offset, alpha_shp, alpha_exp


def min_max_normalization(x, min_val=0.0, max_val=1.0):
    x_min = torch.min(x)
    x_max = torch.max(x)
    normalized_x = (x - x_min) / (x_max - x_min)
    normalized_x = (max_val - min_val) * normalized_x + min_val
    return normalized_x


class SWPDLoss(nn.Module):
    def __init__(self, p_w=None):

        super(SWPDLoss, self).__init__()
        self.weight = [0.8, 0.1, 0.1]
        self.weight_t = [0.0, 0.0, 0.0]
        self.u = _to_tensor(u)
        self.param_mean = _to_tensor(mean_std.get('mean')).float()
        self.param_std = _to_tensor(mean_std.get('std')).float()
        self.w_shp = _to_tensor(w_shp).float()
        self.w_exp = _to_tensor(w_exp).float()

        self.keypoints = _to_tensor(key_point)
        # self.u_base = self.u[self.keypoints]
        # self.w_shp_base = self.w_shp[self.keypoints]
        # self.w_exp_base = self.w_exp[self.keypoints]
        self.u = self.u[self.keypoints]
        self.w_shp = self.w_shp[self.keypoints]
        self.w_exp = self.w_exp[self.keypoints]

        self.w_shp_length = self.w_shp.shape[0] // 3
        # self.shape_shp_weight = torch.concatenate(
        #     (
        #         (100 / torch.arange(101, 300)), (100 / torch.arange(101, 130))
        #     )
        # ).cuda()
        # self.shp_weight = (1000 / torch.arange(1001, 1200)).cuda()
        # self.exp_weight = (1000 / torch.arange(1001, 1030)).cuda()

        self.update = False
        # self.scale = 0.001
        self.scale = 0.01
        self.old_weight = None
        self.count = 0
        # ind_68to21 = [[18], [20], [22], [23], [25], [27], [37], [37, 38, 39, 40, 41, 42], [40], [43],
        #               [43, 44, 45, 46, 47, 48],
        #               [46], [3], [32], [31], [36], [15], [49], [61, 62, 63, 64, 65, 66, 67, 68], [55], [9]]
        # ind_68to21 = [[28, 29, 30, 31, 32, 33, 34, 35, 36], [61, 62, 63, 64, 65, 66, 67, 68]]
        self.weight_68 = [1.0] * 68
        weight_21 = 2.0
        # for i in range(len(ind_68to21)):
        #     # w_21 = weight_21 / len(ind_68to21[i])
        #     w_21 = weight_21
        #     for j in range(len(ind_68to21[i])):
        #         self.weight_68[ind_68to21[i][j] - 1] = 1.0 * w_21
        self.weight_68 = torch.tensor(self.weight_68).cuda()

        if p_w is None:
            self.p_w = [1.0, 1.0, 1.0]
        else:
            self.p_w = p_w

    def reconstruct_and_parse(self, input, target):
        # reconstruct
        param = input * self.param_std + self.param_mean
        param_gt = target * self.param_std + self.param_mean

        # parse param
        p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
        pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)

        return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)

    def update_weight(self, update=True):
        self.update = update
        if update:
            self.old_weight = self.weight
            self.weight_t = [0.0, 0.0, 0.0]
        else:
            # sum_ = sum(self.weight_t)
            # self.weight = [self.weight_t[0] / sum_, self.weight_t[1] / sum_, self.weight_t[2] / sum_]
            self.weight = [self.weight_t[0] * self.scale, self.weight_t[1] * self.scale, self.weight_t[2] * self.scale]
        self.count = 0

    def set_weight(self, weight):
        self.weight = weight

    def set_scale(self, scale):
        self.scale = scale

    def get_weight(self):
        return self.weight

    def get_scale(self):
        return self.scale

    def set_p_w(self, p_w):
        self.p_w = p_w

    def forward(self, input, target):
        """
        Args:
            input:
            target:
        Returns:

        """
        input = input[:, :240]
        target = target[:, :240]
        if self.update:
            if self.weight is None:
                self.weight = [0.8, 0.1, 0.1]
            if self.weight_t is None:
                self.weight_t = [0.0, 0.0, 0.0]
            # TODO 隔一段时间检查一下pose和shape哪个好,还原的是68点
            (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
                = self.reconstruct_and_parse(input, target)

            N = input.shape[0]
            offset[:, -1] = offsetg[:, -1]
            gt_face = (self.u + self.w_shp @ alpha_shpg + self.w_exp @ alpha_expg) \
                .view(N, -1, 3).permute(0, 2, 1)
            # face = (self.u + self.w_shp @ alpha_shp + self.w_exp @ alpha_exp) \
            #     .view(N, -1, 3).permute(0, 2, 1)
            shp_face = (self.u + self.w_shp @ alpha_shp + self.w_exp @ alpha_expg) \
                .view(N, -1, 3).permute(0, 2, 1)
            exp_face = (self.u + self.w_shp @ alpha_shpg + self.w_exp @ alpha_exp) \
                .view(N, -1, 3).permute(0, 2, 1)
            # 分别看pose和shape哪个更差，差的赋予更大的权重
            pose_w = torch.mean((((p @ gt_face + offset) - (pg @ gt_face + offsetg)) ** 2) * self.weight_68).item()
            shp_w = torch.mean((((pg @ shp_face + offsetg) - (pg @ gt_face + offsetg)) ** 2) * self.weight_68).item()
            exp_w = torch.mean((((pg @ exp_face + offsetg) - (pg @ gt_face + offsetg)) ** 2) * self.weight_68).item()
            self.count += 1

            weights = [pose_w, shp_w, exp_w]
            for i in range(3):
                self.weight_t[i] += (weights[i] - self.weight_t[i]) / self.count

        # pose_weight = pose_w / (pose_w + shape_w)
        # shape_weight = shape_w / (pose_w + shape_w)

        pose_loss = torch.mean((input[:, :12] - target[:, :12]) ** 2)
        shp_loss = torch.mean(((input[:, 12:12 + dim_shp] - target[:, 12:12 + dim_shp]) ** 2))
        # * self.shp_weight)
        exp_loss = torch.mean(((input[:, 12 + dim_shp:] - target[:, 12 + dim_shp:]) ** 2))
        # * self.exp_weight)
        loss = pose_loss * self.weight[0] * self.p_w[0] \
               + shp_loss * self.weight[1] * self.p_w[1] \
               + exp_loss * self.weight[2] * self.p_w[2]

        return loss
