from core.options import ImageFittingOptions
import cv2
import face_alignment
import numpy as np
from core import get_recon_model
import os
import torch
import orjson
import core.utils as utils
from tqdm import tqdm
import time
import core.losses as losses
from utril.solexFacePoints import get_points68
import multiprocessing as mp
import urllib.request
from face_merge import merge_multi_face
import oss2

def tran_points_index(potlist):
    tranIdx=[16,15, 14, 13, 12, 11, 10,  9 , 8 , 7 , 6,  5 , 4  ,3 , 2 , 1,  0, 56 ,57 ,58, 59, 60, 65, 64,
 63 ,62 ,61 ,17, 18 ,19, 20 ,21 ,22 ,23, 24, 25 ,47 ,46, 45 ,44 ,49, 48, 50 ,51, 52, 53 ,54 ,55,
 32 ,31, 30, 29, 28 ,27, 26 ,37 ,36 ,35 ,34 ,33, 66, 41, 42, 43, 67, 38 ,39, 40]
    newlist=[]
    for i in tranIdx:
        newlist.append(potlist[i])
    return newlist
def load_coeffs_path(coeffs_path,device_):
    if coeffs_path is None:
        return None
        # 读取保存的系数文件
    coeffs_np = np.load(coeffs_path)  # 加载numpy数组
    device = torch.device(device_)
    coeffs = torch.from_numpy(coeffs_np).float().unsqueeze(0).to(device)
    coeffs.requires_grad_(True)  # 关键：启用梯度计算
    return coeffs
def resizeImg(img):
    h,w,c=img.shape
    resize_param=None
    if h==8000 and w==6000:
        resize_param=[0.1,0.1]
    elif h==1920 and w==1080:
        resize_param = [0.4, 0.4]
    elif h==4800 and w==3600:
        resize_param = [0.1, 0.1]
    resize_img = cv2.resize(img, (int(w*resize_param[0]), int(h*resize_param[1])), interpolation=cv2.INTER_AREA)
    return resize_img,resize_param
def paddingImg(img_arr,img,resize_param,result):
    points=np.array(result["landmarks"])
    box=result["box"]
    box_arr=box.copy()
    box_arr['x1'] = int(box["x1"]/resize_param[0])
    box_arr['x2'] = int(box["x2"]/resize_param[0])
    box_arr['y1'] = int(box["y1"]/resize_param[1])
    box_arr['y2'] = int(box["y2"]/resize_param[1])
    h,w,c=img.shape
    h_arr,w_arr,c=img_arr.shape
    cut_h=int(box['y2']-box['y1'])
    cut_h_arr = int(box_arr['y2'] - box_arr['y1'])
    param_json ={}
    if cut_h<w:
        padding_param=[0,int(box['y1'])-(w-cut_h)//2]
        padding_img=img[padding_param[1]:padding_param[1]+w,0:w]

        padding_param_arr = [0, int(box_arr['y1']) - (w_arr - cut_h_arr) // 2]
        padding_img_arr = img_arr[padding_param_arr[1]:padding_param_arr[1] + w_arr, 0:w_arr]
        param_json["padding_img_arr"]=padding_param_arr
    else:
        padding_param = [(cut_h-w)//2*-1, int(box['y1'])]
        padding_img = np.zeros((cut_h, cut_h,3), dtype=np.uint8)
        padding_img[0:cut_h,abs(padding_param[0]):abs(padding_param[0])+w]=img[int(box['y1']):int(box["y2"]),0:w]

        padding_param_arr = [(cut_h_arr - w_arr) // 2 * -1, int(box_arr['y1'])]
        padding_img_arr = np.zeros((cut_h_arr, cut_h_arr,3), dtype=np.uint8)
        padding_img_arr[0:cut_h_arr, abs(padding_param_arr[0]):abs(padding_param_arr[0]) + w_arr] = img_arr[int(box_arr['y1']):int(box_arr["y2"]), 0:w_arr]
        param_json["padding_img_arr"]=padding_param_arr
        param_json["box_arr"] = box_arr

    h_,w_,c=padding_img.shape
    resize_img = cv2.resize(padding_img, (256, 256), interpolation=cv2.INTER_AREA)
    scaled_points = (points-padding_param) / [h_/256, h_/256]
    scaled_points = scaled_points.astype(int)
    return resize_img,padding_img_arr,scaled_points.tolist(),param_json
def preDetImg(img_arr,args,show):
    resize_img,resize_param=resizeImg(img_arr)
    result=get_points68(resize_img)
    padding_img,padding_img_arr,pts,padding_param=paddingImg(img_arr,resize_img,resize_param,result)
    lmmm=np.array([tran_points_index(pts)])
    resized_face_img_show=None
    if show:
        resized_face_img_show=padding_img.copy()
        for solexpot in pts:
            x1,y1=solexpot
            cv2.circle(resized_face_img_show, (int(x1), int(y1)), 3, (255, 0, 0), -1)
        cv2.imshow("resized_face_img_show", resized_face_img_show)
        cv2.waitKey(3)
    lms = torch.tensor(lmmm, dtype=torch.float32, device=args.device)
    img_tensor = torch.tensor(padding_img[None, ...], dtype=torch.float32, device=args.device)
    print('landmarks detected.')
    return lms,resized_face_img_show,img_tensor,padding_img_arr,padding_param

def fit(args,imgLink,coeffs_path=None,show=False):
    # init face detection and lms detection models
    print('loading image')
    print(imgLink)
    response = urllib.request.urlopen(imgLink)
    image_data = np.asarray(bytearray(response.read()), dtype=np.uint8)
    img_arr = cv2.imdecode(image_data, cv2.IMREAD_COLOR)[:, :, ::-1]

    # img_arr = cv2.imread(img_path)[:, :, ::-1]
    lms,showImg,img_tensor,padding_img_arr,padding_param=preDetImg(img_arr,args,show)
    box_conf={os.path.basename(imgLink)[:-4]:padding_param}
    print('loading models')
    recon_model = get_recon_model(model=args.recon_model,
                                  device=args.device,
                                  batch_size=1,
                                  img_size=args.tar_size)
    coeffs = load_coeffs_path(coeffs_path, args.device)
    lm_weights = utils.get_lm_weights(args.device)
    rigid_optimizer = torch.optim.Adam([recon_model.get_rot_tensor(),recon_model.get_trans_tensor()],lr=args.rf_lr)
    losstrain=None
    preloss=None
    for i in tqdm(range(args.first_rf_iters)):
        rigid_optimizer.zero_grad()
        if i==0 and coeffs != None:
            print("初始 coeffs 均值:", coeffs.mean().item())
            pred_dict = recon_model(coeffs, render=False)
        else:
            pred_dict = recon_model(recon_model.get_packed_tensors(), render=False)
        if show:
            copy_img=showImg.copy()
            for point in pred_dict["lms_proj"][0]:
                x1,y1=point
                cv2.circle(copy_img, (int(x1), int(y1)), 3, (0, 255, 0), -1)
            cv2.imshow("resized_face_img_show", copy_img)
            cv2.waitKey(1)
        lm_loss_val = losses.lm_loss(pred_dict['lms_proj'], lms, lm_weights, img_size=args.tar_size)
        total_loss = args.lm_loss_w * lm_loss_val
        # print("RT total_loss:{}".format(total_loss))
        total_loss.backward()
        rigid_optimizer.step()
        if preloss is not None:
            losstrain=abs(preloss-total_loss)
            if losstrain<1.0e-06:
                break
        preloss=total_loss
    print('start non-rigid fitting')
    nonrigid_optimizer = torch.optim.Adam(
        [recon_model.get_id_tensor(), recon_model.get_exp_tensor(),
         recon_model.get_gamma_tensor(), recon_model.get_tex_tensor(),
         recon_model.get_rot_tensor(), recon_model.get_trans_tensor()], lr=args.nrf_lr)
    for i in tqdm(range(args.first_nrf_iters)):
        nonrigid_optimizer.zero_grad()
        pred_dict = recon_model(recon_model.get_packed_tensors(), render=True)
        # rendered_img = pred_dict['rendered_img']
        lms_proj = pred_dict['lms_proj']
        face_texture = pred_dict['face_texture']
        if show:
            copy_img = showImg.copy()
            for point in pred_dict["lms_proj"][0]:
                x1,y1=point
                cv2.circle(copy_img, (int(x1)+256, int(y1)), 3, (0, 255, 0), -1)
            cv2.imshow("resized_face_img_show", copy_img)
            cv2.waitKey(1)
        # mask = rendered_img[:, :, :, 3].detach()
        # photo_loss_val = losses.photo_loss(
        #     rendered_img[:, :, :, :3], img_tensor, mask > 0)
        lm_loss_val = losses.lm_loss(lms_proj, lms, lm_weights,
                                     img_size=args.tar_size)
        id_reg_loss = losses.get_l2(recon_model.get_id_tensor())
        exp_reg_loss = losses.get_l2(recon_model.get_exp_tensor())
        tex_reg_loss = losses.get_l2(recon_model.get_tex_tensor())
        tex_loss_val = losses.reflectance_loss(face_texture, recon_model.get_skinmask())
        loss = lm_loss_val * args.lm_loss_w + \
               id_reg_loss * args.id_reg_w + \
               exp_reg_loss * args.exp_reg_w + \
               tex_reg_loss * args.tex_reg_w + \
               tex_loss_val * args.tex_w
        # loss = lm_loss_val*args.lm_loss_w + \
        #     id_reg_loss*args.id_reg_w + \
        #     exp_reg_loss*args.exp_reg_w + \
        #     tex_reg_loss*args.tex_reg_w + \
        #     tex_loss_val*args.tex_w + \
        #     photo_loss_val*args.rgb_loss_w
        # print("NT total_loss:{}".format(loss))
        loss.backward()
        nonrigid_optimizer.step()
    loss_str = ''
    loss_str += 'lm_loss: %f\t' % lm_loss_val.detach().cpu().numpy()
    print('done non rigid fitting.', loss_str)
    with torch.no_grad():
        coeffs = recon_model.get_packed_tensors()
        pred_dict = recon_model(coeffs, render=True)
        utils.mymkdirs(args.res_folder)
        basename = os.path.basename(imgLink)[:-4]
        # save the coefficients：保存模型参数
        out_coeff_path = os.path.join(
            args.res_folder, basename + '_coeffs.npy')
        np.save(out_coeff_path,coeffs.detach().cpu().numpy().squeeze())
        # save the mesh into obj format
        out_obj_path = os.path.join(
            args.res_folder, basename+'_mesh.obj')
        vs = pred_dict['vs'].cpu().numpy().squeeze()
        uv=(pred_dict['vs_uv'].cpu().numpy().squeeze())/256
        tri = pred_dict['tri'].cpu().numpy().squeeze()
        uv_path=basename+'_uv.jpg'

        cv2.imwrite(os.path.join(args.res_folder, uv_path),padding_img_arr[:, :, ::-1])
        mtl_path = basename + '_uv.mtl'
        utils.save_mtl(os.path.join(args.res_folder, mtl_path),uv_path)
        utils.save_obj_uv(out_obj_path, vs, tri + 1, uv,mtl_path)
        print('composed image is saved at %s' % args.res_folder)
    return box_conf
def process_file(img_path):
    args = ImageFittingOptions()
    args = args.parse()
    args.device = 'cuda:%d' % args.gpu
    torch.cuda.set_device(args.gpu)
    torch.cuda.init()
    print(f"start fitting image: {os.path.basename(img_path)}")
    return fit(args, img_path, None)
def is_oss_image_exist(url):
    try:
        with urlopen(url, timeout=5) as response:
            return response.status == 200
    except HTTPError as e:
        if e.code == 404:
            return False
        print(f"HTTP Error: {e}")
        return False
    except URLError as e:
        print(f"URL Error: {e}")
        return False
    
def create_(img_link_list):
    start_time = time.time()
    try:
        mp.set_start_method('spawn')
    except RuntimeError:
        print("try set spawn")
        pass
    # 创建进程池（3个进程）
    with mp.Pool(processes=3) as pool:
        result=pool.map(process_file, img_link_list)
    print("all speed time:{}".format(time.time() - start_time))
    return result


def updataOSS(localPath, oss_file_path):

    auth = oss2.Auth('LTAI5tLnE2FPf88GnbPRvZWJ', 'wBsDv7lxZU0oqERNHSWo3QcvyZ1cG3')
    endpoint = 'oss-cn-shanghai.aliyuncs.com'
    bucket_name = 'solex-ai'
    bucket = oss2.Bucket(auth, endpoint, bucket_name)
    try:
        result = bucket.put_object_from_file(oss_file_path, localPath)
        if result.status == 200:
            print(f"文件 {localPath} 成功上传到 OSS: {oss_file_path}")
        else:
            print(f"上传失败，状态码: {result.status}")
    except Exception as e:
        print(f"上传失败: {e}")
    return result.status, "https://solex-ai.oss-cn-shanghai.aliyuncs.com/" + oss_file_path

if __name__ == '__main__':
    img_link_list=[]
    img_link_list.append("https://solex-ai.oss-cn-shanghai.aliyuncs.com/DBFace4800/917/1331668568122724352-1737536129269/whiteFace_face.jpg")
    img_link_list.append("https://solex-ai.oss-cn-shanghai.aliyuncs.com/DBFace4800/917/1331668568122724352-1737536129269/whiteLeft_left.jpg")
    img_link_list.append("https://solex-ai.oss-cn-shanghai.aliyuncs.com/DBFace4800/917/1331668568122724352-1737536129269/whiteRight_right.jpg")
    data=create_(img_link_list)
    save_dir = "/root/hcode/3DMM-Fitting-Pytorch-master/results/api_2"

    with open(os.path.join(save_dir,'output.json'), 'wb') as f:  # 注意 'wb' 二进制模式
        f.write(orjson.dumps(data, option=orjson.OPT_INDENT_2))
    # file_front_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteFace_face_mesh.obj"
    # file_left_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteLeft_left_mesh.obj"
    # file_right_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteRight_right_mesh.obj"
    # img_front_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteFace_face_uv.jpg"
    # img_left_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteLeft_left_uv.jpg"
    # img_right_path="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_1/whiteRight_right_uv.jpg"
    # save_dir="/root/hcode/3DMM-Fitting-Pytorch-master/results/api_2"
    # start_time = time.time()
    # merge_multi_face(file_front_path, file_left_path, file_right_path,img_front_path, img_left_path, img_right_path, save_dir)
    # print("all merge time:{}".format(time.time() - start_time))
