import os

# os.environ['CUDA_VISIBLE_DEVICES']='2'
import util_for_huggingface

# from segment_anything import build_sam, SamPredictor 
import os,pdb,torch,cv2
import numpy as np
from PIL import Image
import torch
from util_flux import process_img_1024,pad_image
from transformers.image_utils import load_image



'''
efficientsam_s.pth   mobile_sam.pth        sam_vit_h_4b8939.pth
efficientsam_ti.pth  sam_vit_b_01ec64.pth  sam_vit_l_0b3195.pth
'''
# SAM_CKP = '/home/shengjie/ckp/segment-anything/checkpoints/sam_vit_h_4b8939.pth'
# facebook/sam2-hiera-large
SAM_NAME = '/mnt/nas/shengjie/huggingface_model_local/sam2-hiera-large/'
examples_dir = '/data/shengjie/style_zhenzhi/'
save_dir = '/data/shengjie/synthesis_zhenzhi/'

# 初始化模型
# sam_checkpoint = "sam_vit_h_4b8939.pth"  # 模型路径
model_type = "vit_h"  # 模型类型(vit_h/vit_l/vit_b)
device = "cuda" if torch.cuda.is_available() else "cpu"  # 自动选择设备

######### sam 工具方法 ###########
import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
def show_mask(mask, ax, random_color=False):
    if random_color:
        color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
    else:
        color = np.array([30/255, 144/255, 255/255, 0.6])
    h, w = mask.shape[-2:]
    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
    ax.imshow(mask_image)
    
def show_points(coords, labels, ax, marker_size=375):
    pos_points = coords[labels==1]
    neg_points = coords[labels==0]
    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)   
    
def show_box(box, ax):
    x0, y0 = box[0], box[1]
    w, h = box[2] - box[0], box[3] - box[1]
    ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))    


def load_sam2():
    # from sam2.sam2_image_predictor import SAM2ImagePredictor
    # predictor = SAM2ImagePredictor.from_pretrained(SAM_NAME)
    
    from sam2.build_sam import build_sam2
    from sam2.sam2_image_predictor import SAM2ImagePredictor
    
    checkpoint = "/mnt/nas/shengjie/huggingface_model_local/sam2-hiera-large/sam2_hiera_large.pt"
    model_cfg = "configs/sam2/sam2_hiera_l.yaml"
    predictor = SAM2ImagePredictor(build_sam2(model_cfg, checkpoint))
    return predictor


def test_sam(predictor,img,x,y):
    # predictor = load_sam()
    # prompt='cloth'

    # 加载图像
    # image = cv2.imread(test_img)  # 替换为你的图片路径
    # h,w,_ = image.shape
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # 转换为RGB格式
    predictor.set_image(img)  # 预处理图像

    # 定义提示点（前景点+背景点）
    input_point = np.array([[x,y], [0, 0]])  # 替换为你的坐标
    input_label = np.array([1, 0])  # 1=前景, 0=背景


    # 执行预测（可替换为框提示）
    masks, scores, _ = predictor.predict(
        point_coords=input_point,
        point_labels=input_label,
        multimask_output=True  # 返回多个候选结果
    )


    # pdb.set_trace()

    index = np.argmax(scores)
    mask = masks[index].astype(np.uint8)*255
    Image.fromarray(mask).save(f'tmp.jpg')
    return mask

    # for ratio in [0.9, 0.7, 0.5, 0.3,0.1]:
    #     scaled = erode_to_target_area(mask.copy(), ratio)
    #     # print( np.sum(scaled > 0) / np.sum(mask > 0))
    #     cv2.imwrite(f'mask_scaled_{ratio:.1f}.png', scaled)

def sam_process_img_1024(imgpath,
                     img_pil=None,
                     x=None,y=None,
                     target_shape = (1024,1024),
                     pad_color=(255,255,255)):
    image = load_image(imgpath) if img_pil is None else img_pil # PIL
    image,left, top, right, bottom = pad_image(image.convert('RGB'),  # 在这里已经几乎变成 1024 了
                              height=target_shape[1],
                              width=target_shape[0],
                              fill=pad_color)
    image_resized = image.resize(target_shape)

    if left:x+=left
    if top:y+=top
    x *= image_resized.width / img_pil.width
    y *= image_resized.height / img_pil.height
    return image_resized,x,y


def erode_to_target_area(mask, target_ratio, max_iter=1000, kernel_size=3):
    """
    通过形态学腐蚀精确控制面积缩减
    :param mask: 输入二值掩膜(0/255)
    :param target_ratio: 目标面积比例(0-1)
    :param max_iter: 最大迭代次数
    :param kernel_size: 腐蚀核大小(建议3或5)
    :return: 侵蚀后的掩膜
    """
    # 初始化
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    current_mask = mask.copy()
    original_area = np.sum(current_mask > 0)
    target_area = int(original_area * target_ratio)
    
    # 逐步腐蚀
    for i in range(max_iter):
        eroded = cv2.erode(current_mask, kernel)
        current_area = np.sum(eroded > 0)
        
        # 终止条件
        if current_area <= target_area or current_area == 0:
            break
            
        current_mask = eroded
    
    # 最终面积校验
    final_ratio = np.sum(current_mask > 0) / original_area
    print(f"目标比例: {target_ratio:.0%} → 实际比例: {final_ratio:.1%} (迭代次数: {i})")
    
    return current_mask


def test_gradio_click():
    import cv2
    import numpy as np
    import gradio as gr

    # predictor = load_sam2()
    model = load_rmbg()

    def draw_marker(img, evt: gr.SelectData):
        x, y = evt.index
        # print(x,y)

        # img = process_img_1024( '' , img_pil=img )
        # mask = get_mask_by_rmbg( model , img )
        mask = test_rmbg( model , img )

        return mask

    with gr.Blocks() as demo:
        img_input = gr.Image(type="pil",height=512)
        img_output = gr.Image(type="pil",height=512)
        img_input.select(draw_marker, inputs=[img_input], outputs=img_output)
    demo.launch(server_name='0.0.0.0',server_port=20015)


def test_rectangle_sam2():
    """
    while 循环 直到用户输入 q
    1 用户输入 图片路径
    2 输入 坐标 (x1,y1,x2,y2)
    3 将 坐标 绘制 框选图 与 图片叠加 保存到 tmp_sam.jpg
    4 输入 n 则 input_box = np.arry(坐标) 输入到 sam 中 保存结果到 tmp_sam.jpg
      如果 输入 a 则 重新 输入 坐标，并绘制框选 保存到 tmp_sam.jpg
    """
    predictor = load_sam2()
    while True:
        img_path = input("请输入图片路径 (输入 q 退出): ").strip()
        if img_path.lower() == 'q':
            print("退出。")
            break
        if not os.path.exists(img_path):
            print("图片路径不存在，请重新输入。")
            continue
        image = cv2.imread(img_path)
        if image is None:
            print("无法读取图片，请检查路径。")
            continue

        # INSERT_YOUR_CODE
        # 新增：让用户选择图片id（0-3），将4096x1024的拼接图片切分，选择对应图片
        h, w, _ = image.shape
        if w != 4096 or h != 1024:
            print(f"警告：图片尺寸为 {w}x{h}，不是预期的4096x1024。")
        while True:
            img_id = input("请输入要处理的图片id (0-3，对应从左到右的第几张，输入 q 退出): ").strip()
            if img_id.lower() == 'q':
                print("退出。")
                return
            try:
                img_id = int(img_id)
                if img_id < 0 or img_id > 3:
                    print("图片id应为0,1,2,3，请重新输入。")
                    continue
            except Exception:
                print("图片id格式错误，请重新输入。")
                continue
            break
        # 切分图片，选择对应部分
        img_w = w // 4
        x_start = img_id * img_w
        x_end = (img_id + 1) * img_w
        image = image[:, x_start:x_end, :]

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        while True:
            coords = input("请输入坐标 x1,y1,x2,y2 (用逗号分隔，输入 q 退出): ").strip()
            if coords.lower() == 'q':
                print("退出。")
                return
            try:
                x1, y1, x2, y2 = map(int, coords.split(','))
                input_box = np.array([x1, y1, x2, y2])
            except Exception as e:
                print("坐标格式错误，请重新输入。")
                continue

            # 绘制框选图
            # 用cv2绘制矩形框并保存
            image_box = image.copy()
            cv2.rectangle(
                image_box,
                (input_box[0], input_box[1]),
                (input_box[2], input_box[3]),
                color=(0, 255, 0),  # 绿色
                thickness=3
            )
            image_box_bgr = cv2.cvtColor(image_box, cv2.COLOR_RGB2BGR)
            cv2.imwrite('tmp_sam.jpg', image_box_bgr)
            print("已保存框选图到 tmp_sam.jpg")

            next_action = input("输入 n 进行分割，输入 a 重新输入坐标，输入 q 退出: ").strip().lower()
            if next_action == 'q':
                print("退出。")
                return
            elif next_action == 'a':
                continue
            elif next_action == 'n':
                # 执行分割
                predictor.set_image(image)
                masks, _, _ = predictor.predict(
                    point_coords=None,
                    point_labels=None,
                    box=input_box[None, :],
                    multimask_output=False,
                )
                mask = masks[0]

                plt.figure(figsize=(10, 10))
                plt.imshow(image)
                show_mask(masks[0], plt.gca())
                show_box(input_box, plt.gca())
                plt.axis('off')
                plt.savefig('tmp_sam.jpg')
                plt.close()
                print("已保存分割结果到 tmp_sam.jpg")
                break
            else:
                print("无效输入，请重新选择。")


def get_mask_by_img_coor(predictor,img_pil):
    """
    while 循环 直到用户输入 q
    1 用户输入 图片路径
    2 输入 坐标 (x1,y1,x2,y2)
    3 将 坐标 绘制 框选图 与 图片叠加 保存到 tmp_sam.jpg
    4 输入 n 则 input_box = np.arry(坐标) 输入到 sam 中 保存结果到 tmp_sam.jpg
      如果 输入 a 则 重新 输入 坐标，并绘制框选 保存到 tmp_sam.jpg
    """
    while True:
        image = np.array(img_pil)
        if image is None:
            print("无法读取图片，请检查路径。")
            continue

        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        while True:
            coords = input("请输入坐标 x1,y1,x2,y2 (用逗号分隔，输入 q 退出): ").strip()
            if coords.lower() == 'q':
                print("退出。")
                return
            try:
                x1, y1, x2, y2 = map(int, coords.split(','))
                input_box = np.array([x1, y1, x2, y2])
            except Exception as e:
                print("坐标格式错误，请重新输入。")
                continue

            # 绘制框选图
            # 用cv2绘制矩形框并保存
            image_box = image.copy()
            cv2.rectangle(
                image_box,
                (input_box[0], input_box[1]),
                (input_box[2], input_box[3]),
                color=(0, 255, 0),  # 绿色
                thickness=3
            )
            image_box_bgr = cv2.cvtColor(image_box, cv2.COLOR_RGB2BGR)
            cv2.imwrite('tmp_sam.jpg', image_box_bgr)
            print("已保存框选图到 tmp_sam.jpg")

            next_action = input("输入 n 进行分割，输入 a 重新输入坐标，输入 q 退出: ").strip().lower()
            if next_action == 'q':
                print("退出。")
                return
            elif next_action == 'a':
                continue
            elif next_action == 'n':
                # 执行分割
                predictor.set_image(image)
                masks, _, _ = predictor.predict(
                    point_coords=None,
                    point_labels=None,
                    box=input_box[None, :],
                    multimask_output=False,
                )
                mask = masks[0]
                return mask
            else:
                print("无效输入，请重新选择。")

def get_mask_by_img_coor_inputbox(predictor,img_pil, x1, y1, x2, y2):
    img_pil = img_pil.convert('RGB')
    image = np.array(img_pil)
    if image is None:
        print("无法读取图片，请检查路径。")
        return None
    try:
        input_box = np.array([x1, y1, x2, y2])
    except Exception as e:
        print("坐标格式错误，请重新输入。")
        return None

    # image_box = image.copy()
    # cv2.rectangle(
    #     image_box,
    #     (input_box[0], input_box[1]),
    #     (input_box[2], input_box[3]),
    #     color=(0, 255, 0),  # 绿色
    #     thickness=3
    # )
    # image_box_bgr = cv2.cvtColor(image_box, cv2.COLOR_RGB2BGR)
    # cv2.imwrite('tmp_sam.jpg', image_box_bgr)
    # print("已保存框选图到 tmp_sam.jpg")

    # 执行分割
    predictor.set_image(image)
    masks, _, _ = predictor.predict(
        point_coords=None,
        point_labels=None,
        box=input_box[None, :],
        multimask_output=False,
    )
    mask = masks[0]
    return mask


if __name__=='__main__':
    # test_gradio_click()
    test_rectangle_sam2()