from facenet_pytorch import MTCNN
from core.options import ImageFittingOptions
import cv2
import face_alignment
import numpy as np
from core import get_recon_model
import os
import torch
import core.utils as utils
from tqdm import tqdm
import core.losses as losses
from utril.solexFacePoints import get_points68
def get_face_bbox(img_arr,orig_w,orig_h):
    mtcnn = MTCNN(select_largest=False)
    # detect the face using MTCNN
    bboxes, probs = mtcnn.detect(img_arr)
    bbox=None
    if bboxes is None:
        print('no face detected')
    else:
        bbox = utils.pad_bbox(bboxes[0], (orig_w, orig_h), args.padding_ratio)
        face_w = bbox[2] - bbox[0]
        face_h = bbox[3] - bbox[1]
        assert face_w == face_h
    # cv2.rectangle(show_img,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),3)
    # resized_show_image = cv2.resize(show_img, (600, 800))
    # cv2.imshow("resized_show_image", resized_show_image)
    # cv2.imshow("resized_face_img", resized_face_img)
    # cv2.waitKey(3)
    return bbox
def get_face_points(resized_face_img):
    fa = face_alignment.FaceAlignment(
        face_alignment.LandmarksType.THREE_D, device="cpu", flip_input=False)
    lms = fa.get_landmarks_from_image(resized_face_img)[0]
    lms = lms[:, :2][None, ...]
    return lms
def show_points():
    pass
def tran_points_index(potlist):
    tranIdx=[16,15, 14, 13, 12, 11, 10,  9 , 8 , 7 , 6,  5 , 4  ,3 , 2 , 1,  0, 56 ,57 ,58, 59, 60, 65, 64,
 63 ,62 ,61 ,17, 18 ,19, 20 ,21 ,22 ,23, 24, 25 ,47 ,46, 45 ,44 ,49, 48, 50 ,51, 52, 53 ,54 ,55,
 32 ,31, 30, 29, 28 ,27, 26 ,37 ,36 ,35 ,34 ,33, 66, 41, 42, 43, 67, 38 ,39, 40]
    newlist=[]
    for i in tranIdx:
        newlist.append(potlist[i])
    return newlist
def fit(args):
    # init face detection and lms detection models
    print('loading models')
    # recon_model = get_recon_model(model=args.recon_model,
    #                               device=args.device,
    #                               batch_size=1,
    #                               img_size=args.tar_size)
    recon_model = get_recon_model(model=args.recon_model,
                                  batch_size=1,
                                  img_size=args.tar_size)
    print('loading images')
    img_arr = cv2.imread(args.img_path)[:, :, ::-1]
    img_arr_left = cv2.imread(args.img_path_left)[:, :, ::-1]
    img_arr_rgiht = cv2.imread(args.img_path_right)[:, :, ::-1]
    orig_h, orig_w = img_arr.shape[:2]
    print('image is loaded. width: %d, height: %d' % (orig_w, orig_h))
    bbox=[0,1500,6000,7500]
    face_w = bbox[2] - bbox[0]
    face_h = bbox[3] - bbox[1]
    img_cut=img_arr[bbox[1]:bbox[3],bbox[0]:bbox[2]]
    img_cut_left=img_arr_left[bbox[1]:bbox[3],bbox[0]:bbox[2]]
    img_cut_right=img_arr_rgiht[bbox[1]:bbox[3],bbox[0]:bbox[2]]

    resized_face_img = cv2.resize(img_cut, (args.tar_size, args.tar_size))
    resized_face_img_left = cv2.resize(img_cut_left, (args.tar_size, args.tar_size))
    resized_face_img_right = cv2.resize(img_cut_right, (args.tar_size, args.tar_size))

    result=get_points68(resized_face_img)
    result_left = get_points68(resized_face_img_left)
    result_right = get_points68(resized_face_img_right)

    lmmm=np.array([tran_points_index(result["landmarks"])])
    lmmm_l = np.array([tran_points_index(result_left["landmarks"])])
    lmmm_r = np.array([tran_points_index(result_right["landmarks"])])

    resized_face_img_show=np.hstack((resized_face_img_left.copy(), resized_face_img.copy(), resized_face_img_right.copy()))
    idx=0
    for solexpot,solexpot_left,solexpot_right in zip(result["landmarks"],result_left["landmarks"],result_right["landmarks"]):
        x1,y1=solexpot
        x2, y2 = solexpot_left
        x3, y3 = solexpot_right
        cv2.circle(resized_face_img_show, (int(x2), int(y2)), 3, (255, 0, 0), -1)
        cv2.circle(resized_face_img_show, (int(x1)+256, int(y1)), 3, (255, 0, 0), -1)
        cv2.circle(resized_face_img_show, (int(x3)+512, int(y3)), 3, (255, 0, 0), -1)
        # cv2.putText(resized_face_img_show, str(idx), (int(x2), int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 0, 0), 1)
        # cv2.putText(resized_face_img_show, str(idx), (int(x1)+256, int(y1)), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 0, 0), 1)
        # cv2.putText(resized_face_img_show, str(idx), (int(x3)+512, int(y3)), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 0, 0), 1)
        idx+=1
    cv2.imshow("resized_face_img_show", resized_face_img_show)
    cv2.waitKey()

    lms = torch.tensor(lmmm, dtype=torch.float32, device=args.device)
    lms_l = torch.tensor(lmmm_l, dtype=torch.float32, device=args.device)
    lms_r = torch.tensor(lmmm_r, dtype=torch.float32, device=args.device)

    img_tensor = torch.tensor(resized_face_img[None, ...], dtype=torch.float32, device=args.device)

    print('landmarks detected.')

    lm_weights = utils.get_lm_weights(args.device)
    print('start rigid fitting')
    rigid_optimizer = torch.optim.Adam([recon_model.get_rot_tensor(),
                                        recon_model.get_trans_tensor()],
                                       lr=args.rf_lr)
    for i in tqdm(range(args.first_rf_iters)):
        print(i)
        rigid_optimizer.zero_grad()
        pred_dict = recon_model(recon_model.get_packed_tensors(), render=False)
        copy_img=resized_face_img_show.copy()
        for point in pred_dict["lms_proj"][0]:
            x1,y1=point
            cv2.circle(copy_img, (int(x1)+256, int(y1)), 3, (0, 255, 0), -1)
        # for point,point_left,point_right in zip(pred_dict["lms_proj"][0],pred_dict["lms_proj_l"][0],pred_dict["lms_proj_r"][0]):
        #     x1,y1=point
        #     x2,y2=point_left
        #     x3,y3=point_right
        #     cv2.circle(copy_img, (int(x2), int(y2)), 3, (0, 255, 0), -1)
        #     cv2.circle(copy_img, (int(x1)+256, int(y1)), 3, (0, 255, 0), -1)
        #     cv2.circle(copy_img, (int(x3)+512, int(y3)), 3, (0, 255, 0), -1)
        cv2.imshow("resized_face_img_show", copy_img)
        cv2.waitKey(1)
        lm_loss_val = losses.lm_loss(pred_dict['lms_proj'], lms, lm_weights, img_size=args.tar_size)
        lm_loss_val_l,lm_loss_val_r=0,0
        # lm_loss_val_l = losses.lm_loss(pred_dict['lms_proj_l'], lms_l, lm_weights, img_size=args.tar_size)
        # lm_loss_val_r = losses.lm_loss(pred_dict['lms_proj_r'], lms_r, lm_weights, img_size=args.tar_size)
        total_loss = args.lm_loss_w * (lm_loss_val+lm_loss_val_l+lm_loss_val_r)
        # total_loss = args.lm_loss_w * lm_loss_val
        print("lm_loss_val:{}".format(lm_loss_val.detach().cpu().numpy()))
        print("total_loss:{}".format(total_loss))
        total_loss.backward()
        rigid_optimizer.step()


    print('done rigid fitting. lm_loss: %f' %lm_loss_val.detach().cpu().numpy())
    print('start non-rigid fitting')
    nonrigid_optimizer = torch.optim.Adam(
        [recon_model.get_id_tensor(), recon_model.get_exp_tensor(),
         recon_model.get_gamma_tensor(), recon_model.get_tex_tensor(),
         recon_model.get_rot_tensor(), recon_model.get_trans_tensor()], lr=args.nrf_lr)
    for i in tqdm(range(args.first_nrf_iters)):

        nonrigid_optimizer.zero_grad()

        pred_dict = recon_model(recon_model.get_packed_tensors(), render=True)
        rendered_img = pred_dict['rendered_img']
        lms_proj = pred_dict['lms_proj']
        lms_proj_l = pred_dict['lms_proj_l']
        lms_proj_r = pred_dict['lms_proj_r']
        face_texture = pred_dict['face_texture']
        copy_img = resized_face_img_show.copy()
        for point,point_left,point_right in zip(pred_dict["lms_proj"][0],pred_dict["lms_proj_l"][0],pred_dict["lms_proj_r"][0]):
            x1,y1=point
            x2,y2=point_left
            x3,y3=point_right
            cv2.circle(copy_img, (int(x2), int(y2)), 3, (0, 255, 0), -1)
            cv2.circle(copy_img, (int(x1)+256, int(y1)), 3, (0, 255, 0), -1)
            cv2.circle(copy_img, (int(x3)+512, int(y3)), 3, (0, 255, 0), -1)
        cv2.imshow("resized_face_img_show", copy_img)
        cv2.waitKey(1)
        mask = rendered_img[:, :, :, 3].detach()

        photo_loss_val = losses.photo_loss(
            rendered_img[:, :, :, :3], img_tensor, mask > 0)

        lm_loss_val = losses.lm_loss(lms_proj, lms, lm_weights,
                                     img_size=args.tar_size)
        lm_loss_val_l = losses.lm_loss(lms_proj_l, lms_l, lm_weights,
                                     img_size=args.tar_size)
        lm_loss_val_r = losses.lm_loss(lms_proj_r, lms_r, lm_weights,
                                     img_size=args.tar_size)
        id_reg_loss = losses.get_l2(recon_model.get_id_tensor())
        exp_reg_loss = losses.get_l2(recon_model.get_exp_tensor())
        tex_reg_loss = losses.get_l2(recon_model.get_tex_tensor())
        tex_loss_val = losses.reflectance_loss(
            face_texture, recon_model.get_skinmask())

        loss = (lm_loss_val+lm_loss_val_l+lm_loss_val_r)*args.lm_loss_w + \
            id_reg_loss*args.id_reg_w + \
            exp_reg_loss*args.exp_reg_w + \
            tex_reg_loss*args.tex_reg_w + \
            tex_loss_val*args.tex_w + \
            photo_loss_val*args.rgb_loss_w

        loss.backward()
        nonrigid_optimizer.step()

    loss_str = ''
    loss_str += 'lm_loss: %f\t' % lm_loss_val.detach().cpu().numpy()
    loss_str += 'photo_loss: %f\t' % photo_loss_val.detach().cpu().numpy()
    loss_str += 'tex_loss: %f\t' % tex_loss_val.detach().cpu().numpy()
    loss_str += 'id_reg_loss: %f\t' % id_reg_loss.detach().cpu().numpy()
    loss_str += 'exp_reg_loss: %f\t' % exp_reg_loss.detach().cpu().numpy()
    loss_str += 'tex_reg_loss: %f\t' % tex_reg_loss.detach().cpu().numpy()
    print('done non rigid fitting.', loss_str)

    with torch.no_grad():
        coeffs = recon_model.get_packed_tensors()
        pred_dict = recon_model(coeffs, render=True)
        rendered_img = pred_dict['rendered_img']
        rendered_img = rendered_img.cpu().numpy().squeeze()
        out_img = rendered_img[:, :, :3].astype(np.uint8)
        out_mask = (rendered_img[:, :, 3] > 0).astype(np.uint8)
        resized_out_img = cv2.resize(out_img, (face_w, face_h))
        resized_mask = cv2.resize(
            out_mask, (face_w, face_h), cv2.INTER_NEAREST)[..., None]

        composed_img = img_arr.copy()
        composed_face = composed_img[bbox[1]:bbox[3], bbox[0]:bbox[2], :] * \
            (1 - resized_mask) + resized_out_img * resized_mask
        composed_img[bbox[1]:bbox[3], bbox[0]:bbox[2], :] = composed_face

        utils.mymkdirs(args.res_folder)
        basename = os.path.basename(args.img_path)[:-4]
        # save the composed image
        out_composed_img_path = os.path.join(
            args.res_folder, basename + '_composed_img.jpg')
        cv2.imwrite(out_composed_img_path, composed_img[:, :, ::-1])
        # save the coefficients：保存模型参数
        # out_coeff_path = os.path.join(
        #     args.res_folder, basename + '_coeffs.npy')
        # np.save(out_coeff_path,

        #         coeffs.detach().cpu().numpy().squeeze())

        # save the mesh into obj format
        out_obj_path = os.path.join(
            args.res_folder, basename+'_mesh.obj')
        vs = pred_dict['vs'].cpu().numpy().squeeze()
        tri = pred_dict['tri'].cpu().numpy().squeeze()
        color = pred_dict['color'].cpu().numpy().squeeze()
        utils.save_obj(out_obj_path, vs, tri+1, color)

        print('composed image is saved at %s' % args.res_folder)


if __name__ == '__main__':
    args = ImageFittingOptions()
    args = args.parse()

    # args.device = 'cuda:%d' % args.gpu

    args.device = "cpu"

    fit(args)
