import cv2
import numpy as np
import mediapipe as mp
from rembg import remove
from PIL import Image  # ✅ 补充导入

# 初始化MediaPipe人体姿态检测
mp_pose = mp.solutions.pose


def detect_body(image):
    """检测人体关键点"""
    with mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.7) as pose:
        results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        return results.pose_landmarks


def prepare_cloth(cloth_path):
    """预处理服装模板"""
    # 读取并去除背景
    cloth = Image.open(cloth_path).convert("RGBA")  # ✅ 确保为 RGBA 格式
    cloth_no_bg = remove(cloth)
    cloth_np = np.array(cloth_no_bg)
    return cloth_np, cloth_np.shape[1], cloth_np.shape[0]  # 返回（图像数据, 宽度, 高度）


def warp_cloth(src_points, dst_points, cloth_img, target_size):
    """服装仿射变换"""
    # 计算仿射变换矩阵
    matrix, _ = cv2.estimateAffinePartial2D(src_points, dst_points)

    # 执行仿射变换
    warped = cv2.warpAffine(
        cloth_img,
        matrix,
        (target_size[0], target_size[1]),
        flags=cv2.INTER_LINEAR,
        borderMode=cv2.BORDER_CONSTANT,
        borderValue=(0, 0, 0, 0)  # 背景透明
    )

    # 保证输出为 RGBA（有4通道）
    if warped.shape[2] == 3:
        alpha = np.ones((warped.shape[0], warped.shape[1], 1), dtype=np.uint8) * 255
        warped = np.concatenate([warped, alpha], axis=2)

    return warped


def blend_images(target_img, cloth_img):
    """图像融合"""
    # 确保 cloth_img 是 RGBA，target_img 是 BGR
    if cloth_img.shape[2] != 4:
        raise ValueError("cloth_img 必须是带 alpha 通道的 RGBA 图像")

    # 转换 target 为 float32
    target = target_img.astype(np.float32)
    cloth_rgb = cloth_img[:, :, :3].astype(np.float32)
    alpha = cloth_img[:, :, 3:4] / 255.0

    # 融合图像
    blended = target * (1 - alpha) + cloth_rgb * alpha
    return blended.astype(np.uint8)

def get_src_points(cloth_w, cloth_h, scale=0.5, vertical_offset=0.0, horizontal_offset=0.0):
    """
    生成服装变形的源点位置。
    参数：
        - scale: 横向缩放比例（0.0~1.0）
        - vertical_offset: 向下偏移（占图像高度的比例）
        - horizontal_offset: 左右偏移（占图像宽度的比例，正数向右）
    """
    left = 0.5 - scale / 2 + horizontal_offset
    right = 0.5 + scale / 2 + horizontal_offset
    top = 0.2 + vertical_offset
    center = 0.6 + vertical_offset

    return np.float32([
        [cloth_w * left, cloth_h * top],     # 左肩
        [cloth_w * right, cloth_h * top],    # 右肩
        [cloth_w * 0.5 + cloth_w * horizontal_offset, cloth_h * center]  # 中心也要平移
    ])




def virtual_tryon(target_path, cloth_path):
    """主处理函数"""
    # 读取目标图像
    target = cv2.imread(target_path)
    if target is None:
        raise FileNotFoundError(f"无法读取目标图像：{target_path}")

    target_h, target_w = target.shape[:2]

    # 预处理服装图像（含去背景）
    cloth, cloth_w, cloth_h = prepare_cloth(cloth_path)
    cloth = cloth[::-1, ...]  # 上下翻转 cloth

    # 检测人体关键点
    landmarks = detect_body(target)
    if landmarks is None:
        raise ValueError("未检测到人体关键点")

    # === 定义服装关键点（基于 cloth 尺寸） ===
    src_points = get_src_points(cloth_w, cloth_h, scale=0.9, vertical_offset=0.11, horizontal_offset=-0.01)  # 控制衣服“贴图宽度”

    # === 提取人体关键点坐标 ===
    left_shoulder = landmarks.landmark[11]
    right_shoulder = landmarks.landmark[12]

    # 左右肩中点 + 向下偏移，作为胸口中心点
    center_x = (left_shoulder.x + right_shoulder.x) / 2
    center_y = (left_shoulder.y + right_shoulder.y) / 2

    dst_points = np.float32([
        [left_shoulder.x * target_w, left_shoulder.y * target_h],     # 左肩
        [right_shoulder.x * target_w, right_shoulder.y * target_h],   # 右肩
        [center_x * target_w, center_y * target_h]                    # 胸口中心点
    ])

    # === 调试图可选输出 ===
    debug_img = target.copy()
    for pt in dst_points:
        cv2.circle(debug_img, (int(pt[0]), int(pt[1])), 5, (0, 255, 0), -1)
    cv2.imwrite("debug_keypoints.jpg", debug_img)


    # === 仿射变换 ===
    warped_cloth = warp_cloth(src_points, dst_points, cloth, (target_w, target_h))

    # === 图像融合 ===
    result = blend_images(target, warped_cloth)
    return result




# 使用示例
if __name__ == "__main__":
    try:
        result = virtual_tryon("person.jpg", "tshirt.jpg")
        cv2.imwrite("result.jpg", result)
        print("换装成功！结果保存为 result.jpg")
    except Exception as e:
        print(f"错误发生：{str(e)}")
