from tqdm import tqdm
import cv2
from core import get_recon_model
import torch
import numpy as np
import core.losses as losses
from utril.solexFacePoints import tran_points_index,get_points68
class FitFace():
    def __init__(self,args):
        self.args=args
        self.recon_model = get_recon_model(model=self.args.recon_model,
                                      batch_size=1,
                                      img_size=self.args.tar_size)
        self.rigid_optimizer = torch.optim.Adam([self.recon_model.get_rot_tensor(),
                                            self.recon_model.get_trans_tensor()],
                                           lr=self.args.rf_lr)
        self.rigid_optimizer = torch.optim.Adam([self.recon_model.get_rot_tensor(),
                                            self.recon_model.get_trans_tensor()],
                                           lr=self.args.rf_lr)
        pass
    def fitting(self,img,):

        result = get_points68(img)
        lmmm = np.array([tran_points_index(result["landmarks"])])
        lms = torch.tensor(lmmm, dtype=torch.float32, device=self.args.device)
        for i in tqdm(range(self.args.first_rf_iters)):
            print(i)
            self.rigid_optimizer.zero_grad()
            pred_dict = self.recon_model(self.recon_model.get_packed_tensors(), render=False)
            copy_img = resized_face_img_show.copy()
            for point in pred_dict["lms_proj"][0]:
                x1, y1 = point
                cv2.circle(copy_img, (int(x1), int(y1)), 3, (0, 255, 0), -1)
            cv2.imshow("resized_face_img_show", copy_img)
            cv2.waitKey(1)
            lm_loss_val = losses.lm_loss(pred_dict['lms_proj'], lms, lm_weights, img_size=self.args.tar_size)
            total_loss = self.args.lm_loss_w * lm_loss_val
            print("total_loss:{}".format(total_loss))
            total_loss.backward()
            self.rigid_optimizer.step()
if __name__=="__main__":
    ff=FitFace()
    ff.fitting(100)