import os
from unittest import result
import cv2
import torch
from PIL import Image
import torch.nn.functional as F
from torchvision import transforms
from SimSwap_main.parsing_model.model import BiSeNet
from SimSwap_main.test_wholeimage_swapspecific import _totensor
from SimSwap_main.util.add_watermark import watermark_image
from SimSwap_main.util.reverse2original import reverse2wholeimage
from util.norm import SpecificNorm
from models.models import create_model
from options.test_options import TestOptions
import numpy as np
from models.fs_model import fsModel
from insightface_func.face_detect_crop_multi import Face_detect_crop


#两个Torch __call__()方法造就的魔法函数,可以通过调用实现变换
transformer = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
    # transforms.Normalize([0.485, 0.456, 0.406], [0.485, 0.456, 0.406])
])

tensor_transformer_Arcface = transforms.Compose([
    #transforms.ToTensor(), #输入待变换tensor为张量故不需要再转换
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])

class Simswapencoder(fsModel):
    def __init__(self):
        super(Simswapencoder,self).__init__()
        self.opt = TestOptions().parse()
        self.model = create_model(self.opt)
        self.model.eval()

        
        self.latend_id = self.gen_latend_id()
        # print(model)
        print("model [%s] was created" % (self.name()))
    
    # 加载并预处理身份图（川普的脸）并放入GPU
    def init_img_id(self, id):
        # pic_a = self.opt.pic_a_path       # 获取身份图像
        # img_a = Image.open(pic_a).convert('RGB')  # 用PIL打开RGB图像
        img_a = tensor_transformer_Arcface(id)        # 变换（对齐+归一化）
        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])  # 重塑为 [batch, C, H, W]
        img_id = img_id.cuda()          # 将ID图移至GPU
        return img_id

    def gen_latend_id(self):
        img_id_downsample = F.interpolate(self.img_id, size=(112, 112))  # 下采样至112x112（ArcFace输入尺寸）
        latend_id = self.netArc(img_id_downsample)  # 通过ArcFace网络提取特征向量
        return latend_id
    
    def inference(self, id, spc, type="ATT"): # type: ignore
        opt = self.opt
        logoclass = watermark_image('./simswaplogo/simswaplogo.png')
        
        #加载脸
        # The person who provides id information 
        self.img_id = self.init_img_id(id)


        if type == "ATT":             
            # # 加载并预处理属性图（ATT图）
            # pic_b = opt.pic_b_path          # 获取属性图像路径
            # img_b = Image.open(pic_b).convert('RGB')
            # img_b = transformer(img_b)      # 应用通用预处理（尺寸调整+归一化）
            # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
            # # 据转移到GPU
            # img_att = img_att.cuda()        # 将ATT图移至GPU
            img_att = spc
            
            img_id = self.img_id

            # 6. 生成身份潜在向量
            img_id_downsample = F.interpolate(img_id, size=(112, 112))  # 下采样至112x112（ArcFace输入尺寸）
            latend_id = model.netArc(img_id_downsample)  # 通过ArcFace网络提取特征向量
            latend_id = latend_id.detach().to('cpu')     # 移回CPU并脱离计算图
            latend_id = latend_id / np.linalg.norm(latend_id, axis=1, keepdims=True)  # L2归一化
            latend_id = latend_id.to('cuda')             # 移回GPU

            # 执行生成过程
            img_fake = self(img_id, img_att, latend_id, latend_id, True)  # 生成新图像
            # 参数说明：        ID图, ATT图,   源身份向量, 目标身份向量, 是否返回中间特征

            # 拼接结果（支持batch处理）
            if range(img_id.shape[0]):
                for i in range(img_id.shape[0]):  # 遍历batch中每张图
                    if i == 0:
                        row3 = img_fake[i] # 生成结果图
                    else:
                        row3 = torch.cat([row3, img_fake[i]], dim=2) # type: ignore
                result = row3 # type: ignore
            else:
                result = img_fake[0]

            tensor = torch.from_numpy(result)
            tensor = tensor.float()  # 转为 float32
            out = tensor.to('cuda')

        elif type == "SP":
            crop_size = opt.crop_size
            # torch.nn.Module.dump_patches = True
            # if crop_size == 512:
            #     opt.which_epoch = 550000
            #     opt.name = '512'
            #     mode = 'ffhq'
            # else:
            #     mode = 'None'

            # model = create_model(opt)
            # model.eval()
            
            mse = torch.nn.MSELoss().cuda()

            spNorm =SpecificNorm()
            
            #创建人脸检测实例
            app = Face_detect_crop(name='antelope', root='./insightface_func/models')
            app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
            
            #被换者输入
            pic_specific = spc

            # The specific person to be swapped
            specific_person_whole = cv2.imread(pic_specific)
            specific_person_align_crop, _ = app.get(specific_person_whole, crop_size)
            specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
            specific_person = transformer_Arcface(specific_person_align_crop_pil)
            specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])

            # convert numpy to tensor
            specific_person = specific_person.cuda()

            #create latent id
            specific_person_downsample = F.interpolate(specific_person, size=(112,112))
            specific_person_id_nonorm = self.netArc(specific_person_downsample)
            # specific_person_id_norm = F.normalize(specific_person_id_nonorm, p=2, dim=1)

            ############## Forward Pass ######################

            pic_b = opt.pic_b_path
            img_b_whole = cv2.imread(pic_b)

            img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)

            id_compare_values = [] 
            b_align_crop_tenor_list = []
            for b_align_crop in img_b_align_crop_list:
                # 转换为PyTorch张量 (RGB格式)
                b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop, cv2.COLOR_BGR2RGB))[None,...].cuda()
                
                # 人脸归一化 (例如: 减零均值/除方差)
                b_align_crop_tenor_arcnorm = spNorm(b_align_crop_tenor)
                
                # 下采样至112x112 (ArcFace标准输入尺寸)
                b_align_crop_tenor_arcnorm_downsample = F.interpolate(b_align_crop_tenor_arcnorm, size=(112,112))
                
                # 提取人脸特征向量 (通过ArcFace模型)
                b_align_crop_id_nonorm = self.netArc(b_align_crop_tenor_arcnorm_downsample)
                
                # 计算与目标ID的相似度 (MSE距离)
                id_compare_values.append(mse(b_align_crop_id_nonorm, specific_person_id_nonorm).detach().cpu().numpy())
                
                # 保存原始人脸张量
                b_align_crop_tenor_list.append(b_align_crop_tenor)

            id_compare_values_array = np.array(id_compare_values)
            min_index = np.argmin(id_compare_values_array)
            # min_value = id_compare_values_array[min_index]

            #使用掩码达到更好效果
            if opt.use_mask:
                n_classes = 19
                net = BiSeNet(n_classes=n_classes)
                net.cuda()
                save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
                net.load_state_dict(torch.load(save_pth))
                net.eval()
            else:
                net =None

            # if min_value < opt.id_thres:

            swap_result = self(None, b_align_crop_tenor_list[0], latend_id, None, True)[0]

            out = reverse2wholeimage([b_align_crop_tenor_list[min_index]], 
                                [swap_result], 
                                [b_mat_list[min_index]], 
                                crop_size, 
                                img_b_whole, 
                                logoclass, 
                                os.path.join(opt.output_path, 
                                                'result_whole_swapspecific.jpg'), 
                                True, #不要水印
                                pasring_model =net,use_mask=opt.use_mask, 
                                norm = spNorm)

            # print('************ Done ! ************')

            # else:
            #     print('The person you specified is not found on the picture: {}'.format(pic_b))

        return out # type: ignore