import os
import io
import cv2
import dlib
import numpy as np
from PIL import Image

# 初始化dlib模型
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

# 路径配置
source_folder = "source"
original_target_folder = "origin_target"  # 新增原始目标图片目录
processed_target_folder = "pro_target"  # 新增预处理后目标图片目录
output_folder = "final_result"
os.makedirs(output_folder, exist_ok=True)

def rotate_image(image, landmarks):
    """旋转图像使眼睛水平对齐"""
    left_eye = (landmarks.part(36).x, landmarks.part(36).y)
    right_eye = (landmarks.part(45).x, landmarks.part(45).y)

    dx = right_eye[0] - left_eye[0]
    dy = right_eye[1] - left_eye[1]
    angle = np.degrees(np.arctan2(dy, dx))

    # 计算旋转中心点
    eye_center = ((left_eye[0] + right_eye[0]) // 2, (left_eye[1] + right_eye[1]) // 2)

    # 执行旋转
    rotation_matrix = cv2.getRotationMatrix2D(eye_center, angle, 1.0)
    rotated_image = cv2.warpAffine(
        image, rotation_matrix, (image.shape[1], image.shape[0]),
        flags=cv2.INTER_LINEAR,
        borderMode=cv2.BORDER_CONSTANT,
        borderValue=(0, 0, 0, 0)
    )
    return rotated_image

def extract_face_mask(image, landmarks, filename):
    """提取完整面部（眼眉、口、鼻、耳朵、下巴）轮廓"""
    output_folder = "face_only"
    os.makedirs(output_folder, exist_ok=True)  # 确保目录存在

    # 创建空遮罩
    mask = np.zeros(image.shape[:2], dtype=np.uint8)

    # 关键点选择优化（完整面部轮廓）
    face_contour = list(range(0, 27)) + [  # 下巴+眉毛+鼻梁
        28, 29, 30, 31, 33, 35,  # 鼻子底部
        36, 37, 38, 39, 40, 41,  # 右眼轮廓
        42, 43, 44, 45, 46, 47,  # 左眼轮廓
        48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60  # 嘴巴轮廓
    ]

    # 提取关键点坐标
    points = np.array([(landmarks.part(i).x, landmarks.part(i).y) for i in face_contour], dtype=np.int32)

    # 创建凸包确保完整轮廓
    hull = cv2.convexHull(points)

    # 使用填充多边形替代凸多边形（解决耳朵缺失问题）
    cv2.fillPoly(mask, [hull], 255)  # 使用 fillPoly 处理凹面

    # 创建带透明通道的输出图像
    output = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
    output[:, :, :3] = image[:, :, :3]
    output[:, :, 3] = mask  # 应用优化后的遮罩

    # 提取人脸部分
    face_only = cv2.bitwise_and(image, image, mask=mask)

    # 保存提取出来的人脸部分
    output_path = os.path.join(output_folder, filename)
    cv2.imwrite(output_path, face_only)
    print(f"✅ 提取人脸部分并保存: {output_path}")

    return output


def get_face_landmarks(image):
    """统一的人脸特征点检测函数"""
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = detector(gray)
    if not faces:
        return None
    return predictor(gray, faces[0])


def get_eye_distance(landmarks):
    """计算两眼间的像素距离"""
    left_eye = (landmarks.part(36).x, landmarks.part(36).y)
    right_eye = (landmarks.part(45).x, landmarks.part(45).y)
    return np.linalg.norm(np.array(left_eye) - np.array(right_eye))  # 欧几里得距离

def process_source_face(source_path):
    """处理源人脸图像"""
    # 读取图像
    with open(source_path, "rb") as f:
        input_data = f.read()

    # 转换为OpenCV格式
    pil_image = Image.open(io.BytesIO(input_data)).convert("RGBA")
    cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGBA2BGRA)

    # 人脸检测
    gray = cv2.cvtColor(cv_image, cv2.COLOR_BGRA2GRAY)
    faces = detector(gray)
    if not faces:
        print(f"⚠️ 未检测到人脸: {source_path}")
        return None

    # 特征点检测
    landmarks = predictor(gray, faces[0])

    # 图像对齐
    rotated = rotate_image(cv_image, landmarks)

    # 再次检测人脸与特征点（旋转后）
    gray_rotated = cv2.cvtColor(rotated, cv2.COLOR_BGRA2GRAY)
    faces_rotated = detector(gray_rotated)
    if not faces_rotated:
        print(f"⚠️ 旋转后未检测到人脸: {source_path}")
        return None
    landmarks_rotated = predictor(gray_rotated, faces_rotated[0])

    # 提取面部区域，并命名为 face_only_源文件名
    face_region = extract_face_mask(rotated, landmarks_rotated, f"face_only_{os.path.basename(source_path)}")

    return face_region, landmarks_rotated

def enhanced_seamless_clone(original_target_path, processed_target_path, src_face, src_landmarks):
    """
    增强版融合函数：
    - 在原始目标图上检测人脸位置
    - 在预处理目标图上进行融合
    - 取消颜色校正步骤
    """
    # 读取原始目标图获取人脸位置
    original_target = cv2.imread(original_target_path)
    tgt_landmarks = get_face_landmarks(original_target)
    if tgt_landmarks is None:
        print(f"⚠️ 原始目标图未检测到人脸: {original_target_path}")
        return None

    # 读取预处理后的目标图
    processed_target = cv2.imread(processed_target_path)
    if processed_target is None:
        print(f"⚠️ 无法读取预处理图像: {processed_target_path}")
        return None

    # 验证图像尺寸一致性
    if original_target.shape[:2] != processed_target.shape[:2]:  # 仅比较高度和宽度
        print(f"⚠️ 图像尺寸不一致: {original_target_path}")
        return None

    # 计算缩放比例
    src_eye_dist = get_eye_distance(src_landmarks)
    tgt_eye_dist = get_eye_distance(tgt_landmarks)
    scale_factor = tgt_eye_dist / src_eye_dist

    # 缩放源人脸
    resized_face = cv2.resize(
        src_face,
        (int(src_face.shape[1] * scale_factor),
         int(src_face.shape[0] * scale_factor))
    )

    # 获取融合中心点（基于原始目标图检测结果）
    tgt_center = (tgt_landmarks.part(30).x, tgt_landmarks.part(30).y)  # 鼻尖位置

    # 准备融合遮罩（取消高斯模糊）
    alpha_mask = resized_face[:, :, 3].copy()
    alpha_mask[alpha_mask > 0] = 255  # 二值化
    face_alpha = cv2.GaussianBlur(alpha_mask, (35, 35), 0)  # 减小模糊半径
    face_alpha = cv2.normalize(face_alpha, None, 0, 255, cv2.NORM_MINMAX)  # 标准化

    # 执行无缝克隆（使用原始颜色）
    result = cv2.seamlessClone(
        resized_face[:, :, :3],  # 直接使用源人脸颜色
        processed_target,
        face_alpha,
        tgt_center,
        cv2.NORMAL_CLONE
    )
    return result

# 主流程修改
source_files = [f for f in os.listdir(source_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
original_target_files = [f for f in os.listdir(original_target_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]

for src_file in source_files:
    src_path = os.path.join(source_folder, src_file)
    src_data = process_source_face(src_path)
    if not src_data:
        continue

    face_img, src_landmarks = src_data

    for tgt_file in original_target_files:
        # 构造对应路径
        original_tgt_path = os.path.join(original_target_folder, tgt_file)
        processed_tgt_path = os.path.join(processed_target_folder, tgt_file)  # 假设同名

        if not os.path.exists(processed_tgt_path):
            print(f"⚠️ 缺少预处理图像: {tgt_file}")
            continue

        # 执行增强版融合
        output = enhanced_seamless_clone(
            original_tgt_path,
            processed_tgt_path,
            face_img,
            src_landmarks
        )

        if output is not None:
            output_name = f"enhanced_{os.path.splitext(src_file)[0]}_on_{os.path.splitext(tgt_file)[0]}.png"
            cv2.imwrite(os.path.join(output_folder, output_name), output)
            print(f"✅ 增强融合完成: {output_name}")