import math
import numpy as np
import torch
import torch.nn as nn
import torchvision.models.resnet as resnet
import torchgeometry
import selfcontact.losses
import shapely.geometry
import trimesh
import cv2
import PIL.Image as Image
from skimage import measure
from tqdm import tqdm

import dataset.utils
import dataset.extract
import spin
import pose_estimation
import losses

import random
import save
from pathlib import Path


PE_KSP_TO_SPIN = {
    "Head": "Head",
    "Neck": "Neck",
    "Right Shoulder": "Right ForeArm",
    "Right Arm": "Right Arm",
    "Right Hand": "Right Hand",
    "Left Shoulder": "Left ForeArm",
    "Left Arm": "Left Arm",
    "Left Hand": "Left Hand",
    "Spine": "Spine1",
    "Hips": "Hips",
    "Right Upper Leg": "Right Upper Leg",
    "Right Leg": "Right Leg",
    "Right Foot": "Right Foot",
    "Left Upper Leg": "Left Upper Leg",
    "Left Leg": "Left Leg",
    "Left Foot": "Left Foot",
    "Left Toe": "Left Toe",
    "Right Toe": "Right Toe",
}


def rot6d_to_rotmat(x):
    """Convert 6D rotation representation to 3x3 rotation matrix.
    Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
    Input:
        (B,6) Batch of 6-D rotation representations
    Output:
        (B,3,3) Batch of corresponding rotation matrices
    """

    x = x.view(-1, 3, 2)
    a1 = x[:, :, 0]
    a2 = x[:, :, 1]
    b1 = nn.functional.normalize(a1)
    b2 = nn.functional.normalize(
        a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1
    )

    b3 = torch.cross(b1, b2)

    return torch.stack((b1, b2, b3), dim=-1)


def rotation_matrix_to_angle_axis(rotmat):
    bs, n_joints, *_ = rotmat.size()
    rotmat = torch.cat(
        [
            rotmat.view(-1, 3, 3),
            rotmat.new_tensor([0, 0, 1], dtype=torch.float32)
            .view(bs, 3, 1)
            .expand(n_joints, -1, -1),
        ],
        dim=-1,
    )
    aa = torchgeometry.rotation_matrix_to_angle_axis(rotmat)
    aa = aa.reshape(bs, 3 * n_joints)

    return aa
    

def get_selector():
    selector = []
    for kp in pose_estimation.KPS:
        tmp = spin.JOINT_NAMES.index(PE_KSP_TO_SPIN[kp])
        selector.append(tmp)

    return selector


def get_smpl_output(smpl, rotmat, betas, use_betas=True, zero_hands=False):
    if smpl.name() == "SMPL":
        smpl_output = smpl(
            betas=betas if use_betas else None,
            body_pose=rotmat[:, 1:],
            global_orient=rotmat[:, 0].unsqueeze(1),
            pose2rot=False,
        )
    elif smpl.name() == "SMPL-X":
        rotmat = rotation_matrix_to_angle_axis(rotmat)
        if zero_hands:
            for i in [20, 21]:
                rotmat[:, 3 * i : 3 * (i + 1)] = 0

            for i in [12, 15]:  # neck, head
                rotmat[:, 3 * i + 1] = 0  # y
        smpl_output = smpl(
            betas=betas,
            body_pose=rotmat[:, 3:],
            global_orient=rotmat[:, :3],
            pose2rot=True,
        )
    else:
        raise NotImplementedError

    return smpl_output, rotmat


def project_and_normalize_to_spin(vertices_3d, camera):
    vertices_2d = vertices_3d  # [:, :2]

    scale, translate = camera[0], camera[1:]
    translate = scale.new_zeros(3)
    translate[:2] = camera[1:]

    vertices_2d = vertices_2d + translate
    vertices_2d = scale * vertices_2d + 1
    vertices_2d = spin.constants.IMG_RES / 2 * vertices_2d

    return vertices_2d


def project_and_normalize_to_spin_legs(vertices_3d, A, camera):
    A, J = A
    A = A[0]
    J = J[0]
    L = vertices_3d.new_tensor(
        [
            [0.98619063, 0.16560926, 0.00127302],
            [-0.16560601, 0.98603675, 0.01749799],
            [0.00164258, -0.01746717, 0.99984609],
        ]
    )
    R = vertices_3d.new_tensor(
        [
            [0.9910211, -0.13368178, -0.0025208],
            [0.13367888, 0.99027076, 0.03864949],
            [-0.00267045, -0.03863944, 0.99924965],
        ]
    )
    scale = camera[0]
    R = A[2, :3, :3] @ R  # 2 - right
    L = A[1, :3, :3] @ L  # 1 - left
    r = J[5] - J[2]
    l = J[4] - J[1]

    rleg = scale * spin.constants.IMG_RES / 2 * R @ r
    lleg = scale * spin.constants.IMG_RES / 2 * L @ l

    rleg = rleg[:2]
    lleg = lleg[:2]

    return rleg, lleg


def get_real_and_data(smpl, selector, axis, betas, cam, use_betas=True, zero_hands=False):
    smpl_output = smpl(
        betas=betas,
        body_pose=axis[:, 3:],
        global_orient=axis[:, :3],
        pose2rot=True
    )
    zz = smpl_output.joints.squeeze(0)
    joints = smpl_output.joints.squeeze(0)
    cam = cam.squeeze(0)
    joints_2d = project_and_normalize_to_spin(joints, cam)
    rleg, lleg = project_and_normalize_to_spin_legs(joints, smpl_output.A, cam)
    joints_2d_orig = joints_2d
    joints_2d = joints_2d[selector]
    
    vertices = smpl_output.vertices.squeeze(0)
    vertices_2d = project_and_normalize_to_spin(vertices, cam)
    zz = zz[selector]
    
    return (
        axis,
        betas,
        cam,
        joints_2d,
        zz,
        vertices_2d,
        smpl_output,
        (rleg, lleg),
        joints_2d_orig
    )


def get_pred_and_data(smpl, selector, rotmat, betas, camera, use_betas=True, zero_hands=False):
        
    # predictions
    smpl_output, rotmat = get_smpl_output(
        smpl, rotmat, betas, camera
    )
    
    rotmat = rotmat.squeeze(0)
    betas = betas.squeeze(0)
    camera = camera.squeeze(0)
    z = smpl_output.joints
    z = z.squeeze(0)
    
    # data
    joints = smpl_output.joints.squeeze(0)
    joints_2d = project_and_normalize_to_spin(joints, camera)
    rleg, lleg = project_and_normalize_to_spin_legs(joints, smpl_output.A, camera)
    joints_2d_orig = joints_2d
    joints_2d = joints_2d[selector]

    vertices = smpl_output.vertices.squeeze(0)
    vertices_2d = project_and_normalize_to_spin(vertices, camera)

    z = z[selector]
    
    return (
        rotmat,
        betas,
        camera,
        joints_2d,
        z,
        vertices_2d,
        smpl_output,
        (rleg, lleg),
        joints_2d_orig,
    )


def get_contact_heatmap(model_contact, img, thresh=0.5):
    contact_heatmap = pose_estimation.infer_single_image(
        model_contact,
        img,
        input_img_size=(192, 256),
        return_kps=False,
    )
    contact_heatmap = contact_heatmap.squeeze(0)
    contact_heatmap_orig = contact_heatmap.copy()

    mi = contact_heatmap.min()
    ma = contact_heatmap.max()
    contact_heatmap = (contact_heatmap - mi) / (ma - mi)
    contact_heatmap_ = ((contact_heatmap > thresh) * 255).astype("uint8")

    contact_heatmap = np.repeat(contact_heatmap[..., None], repeats=3, axis=-1)
    contact_heatmap = (contact_heatmap * 255).astype("uint8")

    return contact_heatmap_, contact_heatmap, contact_heatmap_orig


def normalize_keypoints_to_spin(keypoints_2d, img_size):
    h, w = img_size
    if h > w:  # vertically
        ax1 = 1
        ax2 = 0
    else:  # horizontal
        ax1 = 0
        ax2 = 1

    shift = (img_size[ax1] - img_size[ax2]) / 2
    scale = spin.constants.IMG_RES / img_size[ax2]
    keypoints_2d_normalized = np.copy(keypoints_2d)
    keypoints_2d_normalized[:, ax2] -= shift
    keypoints_2d_normalized *= scale

    return keypoints_2d_normalized, shift, scale, ax2


def unnormalize_keypoints_from_spin(keypoints_2d, shift, scale, ax2):
    keypoints_2d_normalized = np.copy(keypoints_2d)
    keypoints_2d_normalized /= scale
    keypoints_2d_normalized[:, ax2] += shift

    return keypoints_2d_normalized


def get_vertices_in_heatmap(contact_heatmap):
    contact_heatmap_size = contact_heatmap.shape[:2]
    label = measure.label(contact_heatmap)

    y_data_conts = []
    for i in range(1, label.max() + 1):
        predicted_kps_contact = np.vstack(np.nonzero(label == i)[::-1]).T.astype(
            "float"
        )
        predicted_kps_contact_scaled, *_ = normalize_keypoints_to_spin(
            predicted_kps_contact, contact_heatmap_size
        )
        y_data_cont = torch.from_numpy(predicted_kps_contact_scaled).int().tolist()
        y_data_cont = shapely.geometry.MultiPoint(y_data_cont).convex_hull
        y_data_conts.append(y_data_cont)

    return y_data_conts


def discretize(parametrization, n_bins=100):
    bins = np.linspace(0, 1, n_bins + 1)
    inds = np.digitize(parametrization, bins)
    disc_parametrization = bins[inds - 1]

    return disc_parametrization


def get_mapping_from_params_to_verts(verts, params):
    mapping = {}
    for v, t in zip(verts, params):
        mapping.setdefault(t, []).append(v)

    return mapping


def find_contacts(y_data_conts, keypoints_2d, bone_to_params, thresh=12, step=0.0072246375):
    n_bins = int(math.ceil(1 / step)) - 1  # mean face's circumradius
    contact = []
    contact_2d = []
    for_mask = []
    for y_data_cont in y_data_conts:
        contact_loc = []
        contact_2d_loc = []
        buffer = y_data_cont.buffer(thresh)
        mask_add = False
        for i, j in pose_estimation.SKELETON:
            verts, t3d = bone_to_params[(i, j)]
            if len(verts) == 0:
                continue

            t3d = discretize(t3d, n_bins=n_bins)
            t3d_to_verts = get_mapping_from_params_to_verts(verts, t3d)
            t3d_to_verts_sorted = sorted(t3d_to_verts.items(), key=lambda x: x[0])
            t3d_sorted_np = np.array([x for x, _ in t3d_to_verts_sorted])

            line = shapely.geometry.LineString([keypoints_2d[i], keypoints_2d[j]])
            lint = buffer.intersection(line)
            if len(lint.boundary.geoms) < 2:
                continue

            t2d_start = line.project(lint.boundary.geoms[0], normalized=True)
            t2d_end = line.project(lint.boundary.geoms[1], normalized=True)
            assert t2d_start <= t2d_end

            t2ds = discretize(
                np.linspace(t2d_start, t2d_end, n_bins + 1), n_bins=n_bins
            )
            to_add = False
            for t2d in t2ds:
                if t2d < t3d_sorted_np[0] or t2d > t3d_sorted_np[-1]:
                    continue

                t2d_ind = np.searchsorted(t3d_sorted_np, t2d)
                c = t3d_to_verts_sorted[t2d_ind][1]

                contact_loc.extend(c)
                to_add = True
                mask_add = True

                if t2d_ind + 1 < len(t3d_to_verts_sorted):
                    c = t3d_to_verts_sorted[t2d_ind + 1][1]
                    contact_loc.extend(c)

                if t2d_ind > 0:
                    c = t3d_to_verts_sorted[t2d_ind - 1][1]
                    contact_loc.extend(c)

            if to_add:
                contact_2d_loc.append((i, j, t2d_start + 0.5 * (t2d_end - t2d_start)))

        if mask_add:
            for_mask.append(buffer.exterior.coords.xy)

        contact_loc = sorted(set(contact_loc))
        contact_loc = np.array(contact_loc, dtype="int")
        contact.append(contact_loc)
        contact_2d.append(contact_2d_loc)

    for_mask = [np.stack((x, y), axis=0).T[:, None].astype("int") for x, y in for_mask]

    return contact, contact_2d, for_mask


def get_contacts(
    conf,
    sc_module,
    y_data_conts,
    keypoints_2d,
    vertices,
    bone_to_params,
    loss_parallel,
):
    use_contacts = conf["use_contacts"]
    use_msc = conf["use_msc"]
    c_mse = conf["c_mse"]
    
    if use_contacts:
        assert c_mse == 0
        contact, contact_2d, for_mask = find_contacts(
            y_data_conts, keypoints_2d, bone_to_params
        )
        if len(contact_2d) > 0:
            loss_parallel.contact_2d = contact_2d

            # mask = np.zeros((spin.constants.IMG_RES, spin.constants.IMG_RES), dtype="uint8")
            # mask += 255
            # cv2.drawContours(mask, for_mask, -1, 0, 2)
            # mask = cv2.resize(mask, img_size_original[::-1])
            # cv2.imwrite(str(save_path / "mask.png"), mask)

        if len(contact) == 0:
            _, contact = sc_module.verts_in_contact(vertices, return_idx=True)
            contact = contact.cpu().numpy().ravel()
    elif use_msc:
        _, contact = sc_module.verts_in_contact(vertices, return_idx=True)
        contact = contact.cpu().numpy().ravel()
    else:
        contact = np.array([])

    return contact


def get_pred_amass_dataset(axis, batch_size, flip=True):    
    
    # Transfer
    axis = torch.from_numpy(axis[:, :66].reshape(batch_size, -1, 3))
    quats = torchgeometry.angle_axis_to_quaternion(axis).numpy()
    quats = dataset.utils.remove_quat_discontinuities(quats)
    return quats

    
class Bottleneck(nn.Module):
    """Redefinition of Bottleneck residual block
    Adapted from the official PyTorch implementation
    """

    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(
            planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
        )
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class HMR(nn.Module):
    """SMPL Iterative Regressor with ResNet50 backbone"""

    def __init__(self, conf, block, layers, smpl_mean_params):
        self.inplanes = 64
        super(HMR, self).__init__()
        self.n_shape = 10
        self.n_cam = 3
        self.n_joints = 24
        npose = self.n_joints * 6
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc1 = nn.Linear(512 * block.expansion + npose + self.n_shape + self.n_cam, 1024)
        self.drop1 = nn.Dropout()
        self.fc2 = nn.Linear(1024, 1024)
        self.drop2 = nn.Dropout()
        self.decpose = nn.Linear(1024, npose)
        self.decshape = nn.Linear(1024, self.n_shape)
        self.deccam = nn.Linear(1024, self.n_cam)
        nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
        nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
        nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
                
        mean_params = np.load(smpl_mean_params)
        init_pose = torch.from_numpy(mean_params["pose"][:]).unsqueeze(0)
        init_shape = torch.from_numpy(
            mean_params["shape"][:].astype("float32")
        ).unsqueeze(0)
        init_cam = torch.from_numpy(mean_params["cam"]).unsqueeze(0)
        self.register_buffer("init_pose", init_pose)
        self.register_buffer("init_shape", init_shape)
        self.register_buffer("init_cam", init_cam)
        
        # smpl
        self.conf = conf
        self.smpl = spin.SMPLX(
            self.conf["smpl_model_dir"],
            batch_size=1,
            create_transl=False,
            use_pca=False,
            flat_hand_mean=False,
        ).to("cuda")
        self.selector = get_selector()
        self.bone_to_params = np.load(self.conf["bone_parametrization_path"], allow_pickle=True).item()
        self.foot_inds = np.load(self.conf["foot_inds_path"], allow_pickle=True).item()
        self.left_foot_inds = self.foot_inds["left_foot_inds"]
        self.right_foot_inds = self.foot_inds["right_foot_inds"]
        
        # loss
        self.loss_mse = losses.MSE([1, 10, 13])  # Neck + Right Upper Leg + Left Upper Leg
        self.loss_parallel = losses.Parallel(
            skeleton=pose_estimation.SKELETON,
            ignore=(
            (1, 2),  # Neck + Right Shoulder
            (1, 5),  # Neck + Left Shoulder
            (9, 10),  # Hips + Right Upper Leg
            (9, 13),  # Hips + Left Upper Leg
            ),
        )
        self.c_mse = self.conf["c_mse"]
        self.c_new_mse = self.conf["c_par"]
        self.c_beta = 1e-3
        
        if self.c_mse > 0:
            assert self.c_new_mse == 0
        elif self.c_mse == 0:
            assert self.c_new_mse > 0
        
        self.mean_zfoot_val = {}
        
        self.l2 = 0
        self.Ltan = 0
        self.Lcos = 0
        self.Lpar = 0
        self.Lspine = 0
        self.Lgr = 0
        self.Lstraight3d = 0
        self.Lcon2d = 0
        self.loss_foot = 0
        self.loss_sh = 0
        self.msc_loss = 0
        self.Lprior = 0
        self.loss = 0
        
        self.name = 0
        
        
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(
                    self.inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                ),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, batch_sample, i, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
        _, x = spin.process_image(batch_sample["img"][i], input_res=spin.constants.IMG_RES)
        x = x.to("cuda")
        x = x.unsqueeze(0)
        batch_size = x.shape[0]
        
        if init_pose is None:
            init_pose = self.init_pose.expand(batch_size, -1)
        if init_shape is None:
            init_shape = self.init_shape.expand(batch_size, -1)
        if init_cam is None:
            init_cam = self.init_cam.expand(batch_size, -1)

        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x1 = self.layer1(x)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)

        xf = self.avgpool(x4)
        xf = xf.view(xf.size(0), -1)

        pred_pose = init_pose
        pred_shape = init_shape
        pred_cam = init_cam
        for _ in range(n_iter):
            xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1)
            xc = self.fc1(xc)
            xc = self.drop1(xc)
            xc = self.fc2(xc)
            xc = self.drop2(xc)
            pred_pose = self.decpose(xc) + pred_pose
            pred_shape = self.decshape(xc) + pred_shape
            pred_cam = self.deccam(xc) + pred_cam

        pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, self.n_joints, 3, 3)
        pred_shape = torch.zeros(1, 10, dtype=torch.float32).cuda()
        pred_axis = self.hmr_loss(batch_sample, i, pred_rotmat, pred_shape, pred_cam)
        Q = get_pred_amass_dataset(pred_axis, batch_size)
        return torch.from_numpy(Q)
    
    
    def reset(self):
        self.l2 = 0
        self.Ltan = 0
        self.Lcos = 0
        self.Lpar = 0
        self.Lspine = 0
        self.Lgr = 0
        self.Lstraight3d = 0
        self.Lcon2d = 0
        self.loss_foot = 0
        self.loss_sh = 0
        self.msc_loss = 0
        self.Lprior = 0
        self.loss = 0
        
    
    def hmr_loss(self, batch_sample, index, pred_rotmat, pred_shape, pred_cam):
        
        axis_pred = []
        # predict
        (
            rotmat_pred,
            betas_pred,
            camera_pred,
            keypoints_3d_pred,
            z,
            _,
            smpl_output,
            (rleg, lleg),
            _,
        ) = get_pred_and_data(
            self.smpl,
            self.selector,
            pred_rotmat,
            pred_shape,
            pred_cam,
        )
        keypoints_2d_pred = keypoints_3d_pred[:, :2]
        axis_pred.append(rotmat_pred.cpu().detach().numpy())
        
        
        # True
        (
            rotmat_real,
            betas_real,
            camera_real,
            keypoints_3d_real,
            _,
            _,
            smpl_output_real,
            (_, _),
            _
        ) = get_real_and_data(
            self.smpl,
            self.selector,
            batch_sample["axis"][index].to(torch.float32).to("cuda"),
            pred_shape,
            pred_cam,
        )
        keypoints_2d_real = keypoints_3d_real[:, :2]
        
        loss = l2 = 0.0
        if self.c_mse > 0 and self.loss_mse is not None:
            l2 = self.loss_mse(keypoints_2d_pred, keypoints_2d_real)
            loss = loss + self.c_mse *12
            
            self.l2 += l2
        
        vertices_pred = smpl_output.vertices
        lpar = loss_sh = 0.0
        if self.c_new_mse > 0 and self.loss_parallel is not None:
            Ltan, Lcos, Lpar, Lspine, Lgr, Lstraight3d, Lcon2d = self.loss_parallel(
                keypoints_3d_pred,
                keypoints_2d_real,
                z,
                (rleg, lleg),
            )
            lpar = (
                Ltan
                + self.c_new_mse * (self.conf["c_f"] * Lcos + self.conf["c_parallel"] * Lpar)
                + Lspine
                + self.conf["c_reg"] * Lgr
                + self.conf["c_reg"] * Lstraight3d
                + self.conf["c_cont2d"] * Lcon2d
            )
            loss = loss + 300*lpar
            
            self.Ltan += Ltan
            self.Lcos += Lcos
            self.Lpar += Lpar
            self.Lspine += Lspine
            self.Lgr += Lgr
            self.Lstraight3d += Lstraight3d
            self.Lcon2d += Lcon2d
        
            for side in ["left","right"]:
                attr = f"{side}_foot_inds"
                if hasattr(self.loss_parallel, attr):
                    foot_inds = getattr(self.loss_parallel, attr)
                    zind = 1
                    if attr not in self.mean_zfoot_val:
                        with torch.no_grad():
                            self.mean_zfoot_val[attr] = torch.median(
                                vertices_pred[0, foot_inds, zind], dim=0
                            ).values

                    loss_foot = (
                        (vertices_pred[0, foot_inds, zind] - self.mean_zfoot_val[attr])
                        ** 2
                    ).sum()
                    loss = loss + self.conf["c_reg"] * loss_foot
                    
                    self.loss_foot += loss_foot

            if hasattr(self.loss_parallel, "silhuette_vertices_inds"):
                inds = self.loss_parallel.silhuette_vertices_inds
                loss_sh = (
                    (vertices_pred[0, inds, 1] - self.loss_parallel.ground) ** 2
                ).sum()
                loss = loss + self.conf["c_reg"] * loss_sh
                
                self.loss_sh += loss_sh
        
        # rotmat loss
        lprior = ((rotmat_pred - rotmat_real) ** 2).sum()
        self.Lprior += lprior
        loss += lprior
        
        
        contact=None
        # save image
        fname = self.name
        self.name = self.name+1
        cv2.imwrite(str(Path("results")/Path(f"{fname}.jpg")), batch_sample["img"][index].squeeze(0).detach().cpu().numpy())
        save.save_mesh_with_colors(
            smpl_output_real.vertices[0].detach().cpu().numpy(),
            self.smpl.faces,
            Path("results")/Path(f"{fname}_real.ply"),
            inds=contact,
        )
        save.save_mesh_with_colors(
            smpl_output.vertices[0].detach().cpu().numpy(),
            self.smpl.faces,
            Path("results")/Path(f"{fname}_predict.ply"),
            inds=contact,
        )
        
        '''
        save.save_pose_params(
            rotmat_pred, 
            torch.tensor(camera_pred.clone().detach()), 
            betas_pred,
            smpl_output.vertices[0],
            self.smpl,
            contact,
            Path("results")/Path(f"{fname}_predict.pkl")
        )
        save.save_pose_params(
            rotmat_real.squeeze(0), 
            torch.tensor(camera_real.clone().detach()), 
            betas_real,
            smpl_output_real.vertices[0],
            self.smpl,
            contact,
            Path("results")/Path(f"{fname}_real.pkl")
        )
        '''
                
        self.loss += loss
            
        return np.asarray(axis_pred)
               
                                    
    def get_loss(self):
        return [self.l2,
                self.Ltan,
                self.Lcos,
                self.Lpar,
                self.Lspine,
                self.Lgr,
                self.Lstraight3d,
                self.Lcon2d,
                self.loss_foot,
                self.loss_sh,
                self.msc_loss,
                self.Lprior,
                self.loss
                ]
        
        
def hmr(smpl_mean_params, conf, pretrained=True, **kwargs):
    """Constructs an HMR model with ResNet50 backbone.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = HMR(conf, Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)
    if pretrained:
        resnet_imagenet = resnet.resnet50(pretrained=True)
        model.load_state_dict(resnet_imagenet.state_dict(), strict=False)
    return model
