import time

import cv2
import math
import numpy as np
import onnx
import onnxruntime
import torch
from torchvision import transforms
import sys

sys.path.append("../..")
sys.path.append("../../..")
from external.tools import 向量均值, 读取图片,read_image
import os

from external.insight.face_analysis import FaceAnalysis
from external.tools import singleton
from config import models_directory
from utils.video_utils import extract_frames
from utils import many_face_fenxi
import tempfile



import uuid

def generate_uuid_string():
    # 生成一个随机UUID
    uuid_value = uuid.uuid4()
    # 将UUID转换为字符串
    uuid_string = str(uuid_value)
    return uuid_string

def load_faceapp_model():
    app = FaceAnalysis(name='buffalo_l')
    app.prepare(ctx_id=0, det_thresh=0.5, det_size=(640, 640))
    return app


def load_GFPGAN_model():
    GFPGAN_session = onnxruntime.InferenceSession(f"{models_directory}/GFPGANv1.4.onnx",
                                                  providers=["CUDAExecutionProvider"])
    return GFPGAN_session


def load_GPEN_512_model():
    session = onnxruntime.InferenceSession( "./models/GPEN-BFR-512.onnx", providers=["CUDAExecutionProvider", 'CPUExecutionProvider'])
    return session


def load_swapper_model():
    models_path = f"{models_directory}/inswapper_128.onnx"
    model = onnx.load(models_path)

    graph = model.graph
    emap = onnx.numpy_helper.to_array(graph.initializer[-1])

    return onnxruntime.InferenceSession(models_path, providers=["CUDAExecutionProvider"]), emap


def load_codeformer_model():
    codeformer_session = onnxruntime.InferenceSession(f"{models_directory}/codeformer.onnx",
                                                      providers=["CUDAExecutionProvider"])
    return codeformer_session


from loguru import logger


# @singleton
class FaceModels:
    def __init__(self):
        self.input_names = []  # names of the inswapper.onnx inputs
        self.output_names = []  # names of the inswapper.onnx outputs

        self.faceapp = load_faceapp_model()
        logger.debug("加载脸部分析模型完毕..")
        swapper, emap = load_swapper_model()
        self.set_swapper_model(swapper, emap)
        logger.debug("加载换脸模型完毕..")  
        self.parameters = {
            "Enhancer": "CF",  # CF or GFPGAN or GPEN
            "GFPGANAmount": 100, # GFPGANAmount,这个可以单独的进行传入,修复60%即可
            "GFPGAN": True,
            "threshhold": 0.85,
            "BlurAmount": 5,
            # 遮罩需要用到的东西
            "MaskTop": 10,
            "MaskSide": 10,
            "MaskBlur": 10,

        }
        if self.parameters["Enhancer"] == "CF":
            self.codeformer_model = load_codeformer_model()
            logger.debug("加载[CF]模型完毕..")
        if self.parameters["Enhancer"] == "GPEN":
            self.GPEN_512_model = load_GPEN_512_model()
            logger.debug("加载[GPEN]模型完毕..")
        else:
            self.GFPGAN_model = load_GFPGAN_model()
            logger.debug("加载[GF]模型完毕..")
        self.io_binding = True
        self.arcface_dst = np.array(
            [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.7299, 92.2041]],
            dtype=np.float32)
        self.arcface_dst_max = []
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[0][0] - self.arcface_dst[1][0]) * (self.arcface_dst[0][0] - self.arcface_dst[1][0]) + (
                    self.arcface_dst[0][1] - self.arcface_dst[1][1]) * (
                    self.arcface_dst[0][1] - self.arcface_dst[1][1])))
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[1][0] - self.arcface_dst[4][0]) * (self.arcface_dst[1][0] - self.arcface_dst[4][0]) + (
                    self.arcface_dst[1][1] - self.arcface_dst[4][1]) * (
                    self.arcface_dst[1][1] - self.arcface_dst[4][1])))
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[3][0] - self.arcface_dst[4][0]) * (self.arcface_dst[3][0] - self.arcface_dst[4][0]) + (
                    self.arcface_dst[3][1] - self.arcface_dst[4][1]) * (
                    self.arcface_dst[3][1] - self.arcface_dst[4][1])))
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[0][0] - self.arcface_dst[3][0]) * (self.arcface_dst[0][0] - self.arcface_dst[3][0]) + (
                    self.arcface_dst[0][1] - self.arcface_dst[3][1]) * (
                    self.arcface_dst[0][1] - self.arcface_dst[3][1])))
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[0][0] - self.arcface_dst[4][0]) * (self.arcface_dst[0][0] - self.arcface_dst[4][0]) + (
                    self.arcface_dst[0][1] - self.arcface_dst[4][1]) * (
                    self.arcface_dst[0][1] - self.arcface_dst[4][1])))
        self.arcface_dst_max.append(math.sqrt(
            (self.arcface_dst[1][0] - self.arcface_dst[3][0]) * (self.arcface_dst[1][0] - self.arcface_dst[3][0]) + (
                    self.arcface_dst[1][1] - self.arcface_dst[3][1]) * (
                    self.arcface_dst[1][1] - self.arcface_dst[3][1])))

    def 从图片获取人脸数据(self, target_image, max_num=1):
        ret = self.faceapp.get(target_image, max_num=max_num)  # 最多从这个图片提取到多少的人脸数据
        ret_list = []
        for face in ret:
            # det_score 人脸的置信度
            if face["det_score"] > 0.6:
                ret_list.append(face)
            else:
                logger.debug("人脸的置信度太低,不进行处理..")
        
        return ret_list

    def set_swapper_model(self, swapper, emap):
        self.swapper_model = swapper
        self.emap = emap

        # Get in/out size and create some data
        inputs = self.swapper_model.get_inputs()
        for inp in inputs:
            self.input_names.append(inp.name)
        input_cfg = inputs[0]
        input_shape = input_cfg.shape
        self.input_size = tuple(input_shape[2:4][::-1])

        outputs = self.swapper_model.get_outputs()
        for out in outputs:
            self.output_names.append(out.name)

    def 读取源脸(self, temp_file):
        temp_file = cv2.imread(temp_file)
        ret = self.faceapp.get(temp_file, max_num=10)  # 这里是获取脸部的信息
        if ret:
            # 人脸的坐标
            bbox = ret[0].bbox  # 这里也是找到第一个人脸
            y_diff = bbox[3] - bbox[1]
            x_diff = bbox[2] - bbox[0]

            crop = temp_file[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]  # y,x
            if y_diff > x_diff:
                padding = int((y_diff - x_diff) / 2)
                crop = cv2.copyMakeBorder(crop, 0, 0, padding, padding, cv2.BORDER_CONSTANT)
            else:
                padding = int((x_diff - y_diff) / 2)
                crop = cv2.copyMakeBorder(crop, padding, padding, 0, 0, cv2.BORDER_CONSTANT)

            crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)  # 这一步?应该也没错!!?
            crop = cv2.resize(crop, (82, 82))
            temp = [crop, ret[0].embedding]  # 其实只要向量数据就可以了
            return temp

    # 连续读取多张源脸
    def 读取多张源脸(self, temp_files):
        faces = []
        for temp_file in temp_files:
            temp = self.读取源脸(temp_file)
            faces.append(temp)  # 把人脸的信息加入到faces里面
        return faces  # 每一张人脸一组数据... [0,图片数据,1,向量]

    def 读取多张人脸_向量(self, temp_files):
        source_faces_Embedding = []
        source_faces = self.读取多张源脸(temp_files)
        for source_face in source_faces:
            source_faces_Embedding.append(source_face[1])
        return source_faces_Embedding

    # 加载脸部修复参数
    def codeformer(self, swapped_face_upscaled,GFPGANAmount):
        # 脸部修复的提升
        img = swapped_face_upscaled

        img = img.astype(np.float32)[:, :, ::-1] / 255.0
        img = img.transpose((2, 0, 1))
        img = (img - 0.5) / 0.5
        img = np.expand_dims(img, axis=0).astype(np.float32)
        w = np.array([1.0], dtype=np.double)

        if self.io_binding:
            io_binding = self.codeformer_model.io_binding()
            io_binding.bind_cpu_input('x', img)
            io_binding.bind_cpu_input('w', w)
            io_binding.bind_output('y', "cuda")

            self.codeformer_model.run_with_iobinding(io_binding)
            output = io_binding.copy_outputs_to_cpu()[0][0]
        else:
            output = self.codeformer_model.run(None, {'x': img, 'w': w})[0][0]

        img = (output.transpose(1, 2, 0).clip(-1, 1) + 1) * 0.5
        img = (img * 255)[:, :, ::-1]
        img = img.clip(0, 255)
        temp2 = float(GFPGANAmount) / 100.0
        img = cv2.addWeighted(img, temp2, swapped_face_upscaled, 1.0 - temp2, 0)

        return img

    def apply_GFPGAN(self, swapped_face_upscaled):

        temp = swapped_face_upscaled

        # preprocess
        # temp = cv2.resize(temp, (512, 512))
        temp = temp / 255.0
        # temp = temp.astype('float32')
        temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)  # 为什么这里需要转化?
        temp[:, :, 0] = (temp[:, :, 0] - 0.5) / 0.5
        temp[:, :, 1] = (temp[:, :, 1] - 0.5) / 0.5
        temp[:, :, 2] = (temp[:, :, 2] - 0.5) / 0.5
        temp = np.float32(temp[np.newaxis, :, :, :])
        temp = temp.transpose(0, 3, 1, 2)

        ort_inputs = {"input": temp}
        if self.io_binding:
            io_binding = self.GFPGAN_model.io_binding()
            io_binding.bind_cpu_input("input", temp)
            io_binding.bind_output("1288", "cuda")

            self.GFPGAN_model.run_with_iobinding(io_binding)
            ort_outs = io_binding.copy_outputs_to_cpu()
        else:

            ort_outs = self.GFPGAN_model.run(None, ort_inputs)

        output = ort_outs[0][0]

        # postprocess
        output = output.clip(-1, 1)
        output = (output + 1) / 2
        output = output.transpose(1, 2, 0)
        output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
        output = (output * 255.0).round()

        temp2 = float(self.parameters["GFPGANAmount"]) / 100.0
        swapped_face_upscaled = cv2.addWeighted(output, temp2, swapped_face_upscaled, 1.0 - temp2, 0)

        return swapped_face_upscaled

    def swap_core(self, img, kps, s_e, bbox,GFPGANAmount=100):
        from skimage import transform as trans
        from math import floor, ceil
        from numpy.linalg import norm as l2norm
        start_swap_time = time.time()

        # 512 transforms(上面已经重新赋值了)
        ratio = 4.0
        diff_x = 8.0 * ratio
        dst = self.arcface_dst * ratio
        dst[:, 0] += diff_x
        tform = trans.SimilarityTransform()
        tform.estimate(kps, dst)
        M512 = tform.params[0:2, :]
        IM512 = cv2.invertAffineTransform(M512)
        # option 2
        kps_dist = []
        kps_dist.append(math.sqrt(
            (kps[0][0] - kps[1][0]) * (kps[0][0] - kps[1][0]) + (kps[0][1] - kps[1][1]) * (kps[0][1] - kps[1][1])))
        kps_dist.append(math.sqrt(
            (kps[1][0] - kps[4][0]) * (kps[1][0] - kps[4][0]) + (kps[1][1] - kps[4][1]) * (kps[1][1] - kps[4][1])))
        kps_dist.append(math.sqrt(
            (kps[3][0] - kps[4][0]) * (kps[3][0] - kps[4][0]) + (kps[3][1] - kps[4][1]) * (kps[3][1] - kps[4][1])))
        kps_dist.append(math.sqrt(
            (kps[0][0] - kps[3][0]) * (kps[0][0] - kps[3][0]) + (kps[0][1] - kps[3][1]) * (kps[0][1] - kps[3][1])))
        kps_dist.append(math.sqrt(
            (kps[0][0] - kps[4][0]) * (kps[0][0] - kps[4][0]) + (kps[0][1] - kps[4][1]) * (kps[0][1] - kps[4][1])))
        kps_dist.append(math.sqrt(
            (kps[1][0] - kps[3][0]) * (kps[1][0] - kps[3][0]) + (kps[1][1] - kps[3][1]) * (kps[1][1] - kps[3][1])))

        # max distance index between all facial features in frame size
        kps_dist_max_index = kps_dist.index(max(kps_dist))
        kps_dist_max = kps_dist[kps_dist_max_index]

        # distance between same features from arcface reference
        arcface_distance_max = self.arcface_dst_max[kps_dist_max_index]
        kps_ratio = kps_dist_max / arcface_distance_max
        # option 2
        original_face_512 = cv2.warpAffine(img, M512, (512, 512), borderValue=0.0)
        original_face_256 = cv2.resize(original_face_512, (256, 256))
        original_face = cv2.resize(original_face_256, (128, 128))

        # img_bgr = cv2.cvtColor(original_face, cv2.COLOR_RGB2BGR)  # 把图片再转化一下BGR RGB -> BGR
        # cv2.imwrite('../output/original_face.jpg', img_bgr)
        # TODO: 这里是没问题的

        blob = cv2.dnn.blobFromImage(original_face, 1.0 / 255.0, self.input_size, (0.0, 0.0, 0.0), swapRB=True)

        # Select source embedding
        n_e = s_e / l2norm(s_e)
        latent = n_e.reshape((1, -1))
        latent = np.dot(latent, self.emap)
        latent /= np.linalg.norm(latent)
        logger.debug("开始执行换脸步骤")
        if self.io_binding:
            # 开始执行换脸的逻辑,这里是执行models
            io_binding = self.swapper_model.io_binding()
            io_binding.bind_cpu_input(self.input_names[0], blob)
            io_binding.bind_cpu_input(self.input_names[1], latent)
            io_binding.bind_output(self.output_names[0], "cuda")

            self.swapper_model.run_with_iobinding(io_binding)
            ort_outs = io_binding.copy_outputs_to_cpu()
            pred = ort_outs[0]
        else:
            pred = self.swapper_model.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[
                0]

        # 正常!
        # temp_img = pred.transpose((0, 2, 3, 1))[0]
        # temp_bgr = np.clip(255 * temp_img, 0, 255).astype(np.uint8)[:, :, ::-1]
        # cv2.imwrite('../output/pred_output.jpg', temp_bgr)

        img_fake = pred.transpose((0, 2, 3, 1))[0]
        # swapped_face = np.clip(255 * img_fake, 0, 255).astype(np.float32)[:, :, ::-1]
        # swapped_face = np.clip(255 * img_fake, 0, 255).astype(np.float32)
        swapped_face = np.clip(255 * img_fake, 0, 255).astype(np.float32)[:, :, ::-1]  # 尝试反转颜色通道

        # img_bgr = cv2.cvtColor(swapped_face, cv2.COLOR_RGB2BGR)  # 把图片再转化一下BGR RGB -> BGR
        # cv2.imwrite('../output/filename_swapped_face.jpg', swapped_face)

        swapped_face_upscaled = cv2.resize(swapped_face, (512, 512))  # 把脸部尺寸给重新处理一下

        # img_bgr = cv2.cvtColor(swapped_face_upscaled, cv2.COLOR_RGB2BGR)  # 把图片再转化一下BGR RGB -> BGR

        border_mask = np.zeros((128, 128), dtype=np.float32)
        # MaskSide,MaskTop,MaskBlur
        border_mask = cv2.rectangle(border_mask, (int(self.parameters["MaskSide"]), int(self.parameters["MaskTop"])),
                                    (128 - int(self.parameters["MaskSide"]), 128 - 5), (255, 255, 255), -1) / 255.0
        border_mask = cv2.GaussianBlur(border_mask,
                                       (self.parameters["MaskBlur"] * 2 + 1, self.parameters["MaskBlur"] * 2 + 1), 0)

        img_mask = np.ones((128, 128), dtype=np.float32)

        start_enhancer_time = time.time()
        # Codeformer
        if self.parameters["GFPGAN"] and self.parameters['Enhancer'] == 'CF':
            swapped_face_upscaled = self.codeformer(swapped_face_upscaled,GFPGANAmount=GFPGANAmount)  # 应用CF脸部修复,这里可以再传递一个 GFPGANAmount 参数

        # 这里执行高清修复的逻辑
        # # GFPGAN
        if self.parameters["GFPGAN"] and self.parameters['Enhancer'] == 'GFPGAN':
            swapped_face_upscaled = self.apply_GFPGAN(swapped_face_upscaled)
            
        #GPEN
        if self.parameters["GFPGAN"] and self.parameters['Enhancer'] == 'GPEN':
            swapped_face_upscaled = self.apply_GPEN_512(swapped_face_upscaled)
            
        logger.debug(f"脸部高清修复耗时: {time.time() - start_enhancer_time}")
        img_mask = cv2.GaussianBlur(img_mask,
                                    (self.parameters["BlurAmount"] * 2 + 1, self.parameters["BlurAmount"] * 2 + 1), 0)
        img_mask *= border_mask

        img_mask = cv2.resize(img_mask, (512, 512))
        img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1])
        swapped_face_upscaled *= img_mask

        swapped_face_upscaled = cv2.warpAffine(swapped_face_upscaled, IM512, (img.shape[1], img.shape[0]),
                                               borderValue=0.0)

        # Option 2 - 9.8 ms
        kps_scale = 1.42
        bbox[0] = kps[2][0] - kps_ratio * 56.0252 * kps_scale
        bbox[1] = kps[2][1] - kps_ratio * 71.7366 * kps_scale
        bbox[2] = kps[2][0] + kps_ratio * 71.7366 * kps_scale
        bbox[3] = kps[2][1] + kps_ratio * 56.0252 * kps_scale

        left = floor(bbox[0])
        if left < 0:
            left = 0
        top = floor(bbox[1])
        if top < 0:
            top = 0
        right = ceil(bbox[2])
        if right > img.shape[1]:
            right = img.shape[1]

        bottom = ceil(bbox[3])
        if bottom > img.shape[0]:
            bottom = img.shape[0]

        swapped_face_upscaled = swapped_face_upscaled[top:bottom, left:right, 0:3].astype(np.float32)
        img_a = img[top:bottom, left:right, 0:3].astype(np.float32)

        img_mask = cv2.warpAffine(img_mask, IM512, (img.shape[1], img.shape[0]), borderValue=0.0)
        img_mask = np.reshape(img_mask, [img_mask.shape[0], img_mask.shape[1], 1])
        img_mask = img_mask[top:bottom, left:right, 0:1]

        img_mask = 1.0 - img_mask

        img_mask = torch.from_numpy(img_mask)
        img_a = torch.from_numpy(img_a)
        swapped_face_upscaled += torch.mul(img_mask, img_a).numpy()
        img[top:bottom, left:right, 0:3] = swapped_face_upscaled
        logger.debug(f"整体耗时: -> {time.time() - start_swap_time}")
        return img.astype(np.uint8)  # BGR,返回了这个图片




    # @profile    
    def ret50_landmarks(self, image):    
        image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
        image = image.permute(1,2,0)
        
        # image = image - [104, 117, 123]
        mean = torch.tensor([104, 117, 123], dtype=torch.float32, device='cuda')
        image = torch.sub(image, mean)
        
        # image = image.transpose(2, 0, 1)
        # image = np.float32(image[np.newaxis,:,:,:])
        image = image.permute(2,0,1)
        image = torch.reshape(image, (1, 3, 512, 512))


        height, width = (512, 512)
        tmp = [width, height, width, height, width, height, width, height, width, height]
        scale1 = torch.tensor(tmp, dtype=torch.float32, device='cuda')
        
        # ort_inputs = {"input": image}        
        conf = torch.empty((1,10752,2), dtype=torch.float32, device="cuda").contiguous()
        landmarks = torch.empty((1,10752,10), dtype=torch.float32, device="cuda").contiguous()

        io_binding = self.resnet_model.io_binding() 
        io_binding.bind_input(name='input', device_type='cuda', device_id=0, element_type=np.float32, shape=(1,3,512,512), buffer_ptr=image.data_ptr())
        io_binding.bind_output(name='conf', device_type='cuda', device_id=0, element_type=np.float32, shape=(1,10752,2), buffer_ptr=conf.data_ptr())
        io_binding.bind_output(name='landmarks', device_type='cuda', device_id=0, element_type=np.float32, shape=(1,10752,10), buffer_ptr=landmarks.data_ptr())
        
        # _, conf, landmarks = self.resnet_model.run(None, ort_inputs)        
        torch.cuda.synchronize('cuda')
        self.resnet_model.run_with_iobinding(io_binding)        
        

        # conf = torch.from_numpy(conf)
        # scores = conf.squeeze(0).numpy()[:, 1]
        scores = torch.squeeze(conf)[:, 1]
        
        # landmarks = torch.from_numpy(landmarks)
        # landmarks = landmarks.to('cuda')        

        priors = torch.tensor(self.anchors).view(-1, 4)
        priors = priors.to('cuda')

        # pre = landmarks.squeeze(0) 
        pre = torch.squeeze(landmarks, 0)
        
        tmp = (priors[:, :2] + pre[:, :2] * 0.1 * priors[:, 2:], priors[:, :2] + pre[:, 2:4] * 0.1 * priors[:, 2:], priors[:, :2] + pre[:, 4:6] * 0.1 * priors[:, 2:], priors[:, :2] + pre[:, 6:8] * 0.1 * priors[:, 2:], priors[:, :2] + pre[:, 8:10] * 0.1 * priors[:, 2:])
        landmarks = torch.cat(tmp, dim=1)
        # landmarks = landmarks * scale1
        landmarks = torch.mul(landmarks, scale1)

        landmarks = landmarks.cpu().numpy()  

        # ignore low scores
        # inds = np.where(scores > 0.97)[0]
        inds = torch.where(scores>0.97)[0]
        inds = inds.cpu().numpy()  
        scores = scores.cpu().numpy()  
        
        landmarks, scores = landmarks[inds], scores[inds]    

        # sort
        order = scores.argsort()[::-1]
        landmarks = landmarks[order][0]

        return np.array([[landmarks[i], landmarks[i + 1]] for i in range(0,10,2)])


    # 还不能用
    def apply_GPEN_512(self, swapped_face_upscaled,GFPGANAmount=100):  
        from torchvision.transforms import v2   
        from skimage import transform as trans
        # Set up Transformation
        dst = self.arcface_dst * 4.0
        dst[:,0] += 32.0        
        tform = trans.SimilarityTransform()        
        
        # if self.is_image_loaded:
            # try:
        dst = self.ret50_landmarks(swapped_face_upscaled) 
        #     except:
        #         return swapped_face_upscaled       

        tform.estimate(dst, self.FFHQ_kps)

        # Transform, scale, and normalize
        temp = v2.functional.affine(swapped_face_upscaled, tform.rotation*57.2958, (tform.translation[0], tform.translation[1]) , tform.scale, 0, center = (0,0) )
        temp = v2.functional.crop(temp, 0,0, 512, 512)        
        temp = torch.div(temp, 255)
        temp = v2.functional.normalize(temp, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=False)
        temp = torch.unsqueeze(temp, 0)

        # Bindings
        outpred = torch.empty((1,3,512,512), dtype=torch.float32, device="cuda").contiguous()
        io_binding = self.GPEN_512_model.io_binding() 
        io_binding.bind_input(name='input', device_type='cuda', device_id=0, element_type=np.float32, shape=(1,3,512,512), buffer_ptr=temp.data_ptr())
        io_binding.bind_output(name='output', device_type='cuda', device_id=0, element_type=np.float32, shape=(1,3,512,512), buffer_ptr=outpred.data_ptr())
        
        # Sync and run model
        syncvec = self.syncvec.cpu()
        self.GPEN_512_model.run_with_iobinding(io_binding)
        
        # Format back to cxHxW @ 255
        outpred = torch.squeeze(outpred)      
        outpred = torch.clamp(outpred, -1, 1)
        outpred = torch.add(outpred, 1)
        outpred = torch.div(outpred, 2)
        outpred = torch.mul(outpred, 255)
        
        # Invert Transform
        outpred = v2.functional.affine(outpred, tform.inverse.rotation*57.2958, (tform.inverse.translation[0], tform.inverse.translation[1]) , tform.
        inverse.scale, 0, interpolation=v2.InterpolationMode.BILINEAR, center = (0,0) )

        # Blend
        alpha = GFPGANAmount/100.0  
        outpred = torch.add(torch.mul(outpred, alpha), torch.mul(swapped_face_upscaled, 1-alpha))

        return outpred    



    # 解析人脸对,多人换脸的核心逻辑
    def preprocess_source_images(self,data):
        """
        预处理用户提供的数据，创建目标人脸和用户人脸向量的配对。
        使用用户提供的每组图片的人脸向量均值作为源人脸向量。
        # 这个函数需要再拆分吗? 把target_faces作为参数来传递?
        """
        target_image = read_image(data["target_image"])
        target_faces = self.从图片获取人脸数据(target_image, max_num=8)
        # 人脸向量的配比
        face_pairs = []
        for source_image in data["source_images"]:
            target_faces_index = source_image["index"]
            source_face_vectors = []

            for face_url in source_image["face_image_urls"]:
                source_face_data = self.从图片获取人脸数据(read_image(face_url), max_num=1)
                if source_face_data:
                    source_face_vector = source_face_data[0].embedding
                    source_face_vectors.append(source_face_vector)
            # 假如找到了目标人脸的值
            if source_face_vectors:
                # 计算每组源图片人脸向量的均值
                source_face_mean_vector = 向量均值(source_face_vectors)
                target_face_vector = target_faces[target_faces_index].embedding
                    # face_pairs.append((target_face_vector, source_face_mean_vector)) #需要转成lits的格式
                face_pairs.append((target_face_vector.tolist(), source_face_mean_vector.tolist()))
                #出现人脸多传的情况

        return face_pairs
    

    def 从视频中获取最佳人脸数(self,video_url,oss_name="ali"):
        from config import kSourceVideosData
        try:
            temp_dir = kSourceVideosData + f"/{generate_uuid_string()}"
            # 用temp_dir创建一个临时的文件夹
            os.makedirs(temp_dir, exist_ok=True)
            extract_frames(video_url, temp_dir)
            face_data = many_face_fenxi.process_all_frames_in_folder(temp_dir, self.从图片获取人脸数据)
            logger.debug(f"一共获取到{len(face_data)}个人脸") # 这里就会获取不到人脸
            urls = many_face_fenxi.start(face_data,oss_name)
        finally:
            # 如果 temp_dir 文件夹存在,就删除这个文件夹
            if os.path.exists(temp_dir):
                os.system(f"rm -rf {temp_dir}")
        
        return urls


    def preprocess_source_images_v2(self, data):
        """
        参数传递实例:
        requests_data = {
            # 这个 target_video 一般是没什么用
            "target_video":"https://ty-aihuanlian.oss-cn-shanghai.aliyuncs.com/test/RPReplay_Final1699878287.mov",
            "face_data":[
                {
                    "target_face_url":"http://ty-huanlian.oss-cn-shanghai.aliyuncs.com/swap_face%2F202311%2Fbest_face_0_1700020151_output_frame_4.jpg",
                    "user_face_urls":["http://usfile.chaotuapp.com/uploads/android/user/1699935515321.jpg"]
                },
                {
                    "target_face_url":"http://ty-huanlian.oss-cn-shanghai.aliyuncs.com/swap_face%2F202311%2Fbest_face_1_1700020151_output_frame_6.jpg",
                    "user_face_urls":["https://ty-aihuanlian.oss-cn-shanghai.aliyuncs.com/test/IMG_2760.jpg"]
                }

            ]
        }
        """
        
        # 人脸向量的配对
        face_pairs = []

        for item in data:
            target_face_url = item['target_face_url']
            user_face_urls = item['user_face_urls']

            # 从目标人脸URL获取人脸数据
            target_face_data = self.从图片获取人脸数据(read_image(target_face_url), max_num=1)
            if not target_face_data:
                print (target_face_url,"没有人脸")
                continue
            target_face_vector = target_face_data[0].embedding

            # 处理用户人脸URLs
            source_face_vectors = []
            for face_url in user_face_urls:
                user_face_data = self.从图片获取人脸数据(read_image(face_url), max_num=1)
                if user_face_data:
                    user_face_vector = user_face_data[0].embedding
                    source_face_vectors.append(user_face_vector)

            # 计算用户人脸向量的均值
            if source_face_vectors:
                source_face_mean_vector = 向量均值(source_face_vectors)
                face_pairs.append((target_face_vector.tolist(), source_face_mean_vector.tolist()))

        return face_pairs


if __name__ == '__main__':
    load_faceapp_model()
