import array
import os
from functools import cmp_to_key
from pathlib import Path
import time
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
import requests
from PIL import Image
from tqdm import tqdm
import torch
from transformers import Owlv2Processor, Owlv2ForObjectDetection
from segment_anything import sam_model_registry, SamPredictor
import scannet_200_classes
from utils import calculate_cover
from common import get_color

def results_cmp(res1, res2):
    if res1["label"] == res2["label"]:
        return res2['score'] - res1['score']
    else:
        return res1["label"] - res2["label"]

class ObjectDetectAndSegment(object):
    def __init__(self, label_csv_file):
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        self._classes = scannet_200_classes.get_class_labels(label_csv_file)
        self._id_to_name = {i: x.name for (i, x) in enumerate(self._classes)}
        def process_text(x: str) -> str:
            return x.replace("-", " ").replace("_", " ").lstrip().rstrip().lower()
        
        prebuilt_class_names = [
            process_text(x)
            for x in self._id_to_name.values()
        ]
        
        # We find a photo of prompt can improve OWL-ViT's performance
        self._all_classes = ['a photo of ' + class_name for class_name in prebuilt_class_names]

        self._owl_threshold = 0.2
        self._visible = False

    def load_model(self, model_path):
        owlvit_path = os.path.join(model_path, "owlvit2")
        sam_path = os.path.join(model_path, "SAM/sam_vit_b_01ec64.pth")
        print('owlvit_path: {}\n, sam_path: {}'.format(owlvit_path, sam_path))
        
        # self.processor = OwlViTProcessor.from_pretrained(owlvit_path)
        # self.model = OwlViTForObjectDetection.from_pretrained(owlvit_path)
        self.processor = Owlv2Processor.from_pretrained(owlvit_path)
        self.model = Owlv2ForObjectDetection.from_pretrained(owlvit_path).to(self.device)
        print('load owlvit2 model success')

        model_type = "vit_b"
        sam = sam_model_registry[model_type](checkpoint=sam_path)
        self.mask_predictor = SamPredictor(sam)
        self.mask_predictor.model = self.mask_predictor.model.to(self.device)
        print('load sam model success')
        print('模型加载成功')

    def process_all_color_images(self, folder_path):
        """
        遍历指定文件夹下所有以 'color.png' 结尾的图片，并对每张图片调用 `process_one` 方法。
        
        参数:
            folder_path (str): 文件夹路径，用于查找 'color.png' 图像。
        """

        if not os.path.exists(folder_path):
            print("文件夹不存在: {}".format(folder_path))
            return

        # 使用 Pathlib 遍历文件夹中所有以 'color.png' 结尾的文件
        image_paths = sorted(Path(folder_path).glob('*color.png'))

        index = 0
        for image_path in tqdm(image_paths, desc="detect and segment"):
            
            # 读取图像数据
            raw_bgr = cv2.imread(image_path)

            raw_rgb = raw_bgr[:, :, ::-1]
            label_info, combined_mask = self.process_one(raw_rgb)
            if self._visible:
                self.show_mask_and_label(image=raw_bgr, combined_mask=combined_mask, label_info=label_info, cv_show_time=1)
            
            # 保存结果
            image_name = image_path.name
            id = image_name.split('_')[0]
            label_json_file = os.path.join(folder_path, id + "_labels.json")
            with open(label_json_file, 'w') as f:
                json.dump(label_info, f, indent=4)
            
            mask_file = os.path.join(folder_path, id + "_predicted.png")
            cv2.imwrite(mask_file, combined_mask)

    def process_one(self, raw_rgb, show_detect_box=False):
        '''
        检测+分割单张图片
        输出 
        json 格式的实例标签信息 label_info
        所有 mask 保存在一张灰度图片上(8位无符号), 像素值为 label_info 中的 id
        return label_info -> list, mask -> 与图像大小一致的二维 np.array
        '''
        rgb = torch.Tensor(raw_rgb.copy()).to(self.device)

        # 目标检测
        target_sizes = torch.Tensor([rgb.size()])
        inputs_data = self.processor(text=self._all_classes, images=rgb, return_tensors="pt")
        # input有三个元素: input_ids、attention_mask、pixel_values
        for input_data in inputs_data:
            inputs_data[input_data] = inputs_data[input_data].to(self.device)
        with torch.no_grad():
            outputs = self.model(**inputs_data)
        # outputs是一个字典，包含七个元素: logits,pred_boxes,text_embeds,image_embeds,class_embeds,text_model_output,vision_model_output
        results = self.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=self._owl_threshold)                    
        
        if len(results) == 0:
            print('no object detected')
            return [], np.zeros_like(raw_rgb[:, :, 0], dtype=np.uint8)
        
        i = 0
        boxes, scores, labels, features = self.filter_not_valid_detection(results[i]["boxes"], 
                                                                          results[i]["scores"], 
                                                                          results[i]["labels"], 
                                                                          results[i]['class_embed'])

        if len(boxes) == 0:
            print('no object detected remained after filter')
            return [], np.zeros_like(raw_rgb[:, :, 0], dtype=np.uint8)
        
        if self._visible and show_detect_box:
            plt.figure(figsize=(10, 10))
            plt.imshow(raw_rgb)
            ax = plt.gca()

            for i in range(len(boxes)):
                box = boxes[i].cpu().numpy()
                score = scores[i].cpu().numpy()
                label = labels[i].cpu().numpy()

                # 绘制边界框
                x0, y0, x1, y1 = box
                width, height = x1 - x0, y1 - y0
                rect = plt.Rectangle((x0, y0), width, height, fill=False, color='red', linewidth=2)
                ax.add_patch(rect)

                # 显示类别标签和置信度
                class_name = self._classes[label].name  # 获取类别名称
                ax.text(x0, y0, f'{class_name}: {score:.2f}', 
                        bbox=dict(facecolor='yellow', alpha=0.5), fontsize=12, color='black')
            plt.axis('off')
            plt.show()

        # 将检测结果写入 json 文件中
        label_info = []
        for i in range(len(boxes)):
            box = boxes[i].cpu().numpy()
            score = scores[i].cpu().numpy()
            label = labels[i].cpu().numpy()
            is_thing = self._classes[label].is_thing

            x0, y0, x1, y1 = box
            width = x1 - x0
            height = y1 - y0
            area = width * height

            # 显示类别标签和置信度
            class_name = self._classes[label].name  # 获取类别名称
            # print("label: {}, class_name: {}, score: {}".format(label, class_name, score))
            label_info.append({
                # 所有序号都从 1 开始
                "id": int(i + 1),
                "isthing": is_thing,
                "category_id": int(label),
                "area": int(area)
            })

        # 分割        
        # Now run SAM to compute segmentation mask
        input_boxes = boxes.detach().to(self.device)  

        self.mask_predictor.set_image(rgb.cpu().numpy())   
        # self.mask_predictor.set_image(rgb)    

        if len(input_boxes) == 0:
            print('no candidated object')
            return [], np.array([])
        
        transformed_boxes = self.mask_predictor.transform.apply_boxes_torch(input_boxes.reshape(-1, 4), rgb.shape[:2])  
        masks, iou_predictions, low_res_masks = self.mask_predictor.predict_torch(
            point_coords=None,
            point_labels=None,
            boxes=transformed_boxes,
            multimask_output=False
        )
        masks = masks[:, 0, :, :]
        
        # 将 mask 合并为一张图
        # 初始化一个全零的二维矩阵，形状与单个 mask 相同
        combined_mask = np.zeros_like(masks[0, :, :].cpu().numpy(), dtype=np.uint8)
        # 初始化一个 score map，记录每个像素当前最大的 score
        score_map = np.zeros_like(masks[0, :, :].cpu().numpy(), dtype=np.float32)

        # 遍历每个 mask 并将其加到 combined_mask 上
        for i in range(masks.shape[0]):
            mask = masks[i, :, :].cpu().numpy()
            label = labels[i].cpu().numpy()
            instance_id = label_info[i]["id"]
            score = scores[i].cpu().numpy()  # 获取当前 mask 的 score

            # 将 mask 转换为整数类型 (False -> 0, True -> 1)
            mask_int = mask.astype(np.uint8)

            # 创建当前 mask 的 score map（布尔转为 float）
            current_score = mask_int * score

            # 只保留比之前更高 score 的区域
            update_mask = (current_score > score_map)
            combined_mask[update_mask] = instance_id
            score_map[update_mask] = current_score[update_mask]

        # 提取 label_info 中的所有 instance_id
        valid_instance_ids = set(info["id"] for info in label_info)

        # 提取 combined_mask 中所有唯一的像素值
        unique_pixel_values = set(np.unique(combined_mask))

        # 检查每个像素值是否都在 valid_instance_ids 中
        invalid_pixel_values = unique_pixel_values - valid_instance_ids
        invalid_pixel_values = invalid_pixel_values - {0}
        if invalid_pixel_values:
            print(f"发现无效的像素值: {invalid_pixel_values - {0}}(排除背景 0)")

        return label_info, combined_mask

    def test_One_Image(self, image_path, save_result=False):
        # 读取图像数据
        raw_rgb = cv2.imread(image_path)

        if self._visible:
            cv2.imshow("input", raw_rgb)
            cv2.waitKey(0)

        raw_rgb = raw_rgb[:, :, ::-1]
        rgb = torch.Tensor(raw_rgb.copy()).to(self.device)

        # 目标检测
        start_time = time.time()

        target_sizes = torch.Tensor([rgb.size()])
        inputs_data = self.processor(text=self._all_classes, images=rgb, return_tensors="pt")
        # input有三个元素: input_ids、attention_mask、pixel_values
        for input_data in inputs_data:
            inputs_data[input_data] = inputs_data[input_data].to(self.device)
        with torch.no_grad():
            outputs = self.model(**inputs_data)
        # outputs是一个字典，包含七个元素: logits,pred_boxes,text_embeds,image_embeds,class_embeds,text_model_output,vision_model_output
        results = self.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=self._owl_threshold)                    
        if len(results) == 0:
            print('no object detected')
            return
        i = 0        
        boxes, scores, labels, features = self.filter_not_valid_detection(results[i]["boxes"], results[i]["scores"], results[i]["labels"], results[i]['class_embed'])
        
        end_time = time.time()
        print('object detection time: {} s'.format(end_time - start_time))

        if len(boxes) == 0:
            print('no object detected remained after filter')
            return

        # 展示目标检测结果
        if save_result:
            annotated_image = self.draw_boxes_on_image(raw_rgb, boxes, scores, labels)
            save_path = image_path + "output_detection.png"
            cv2.imwrite(save_path, annotated_image)

        if self._visible:
            plt.figure(figsize=(10, 10))
            plt.imshow(raw_rgb)
            ax = plt.gca()

            for i in range(len(boxes)):
                box = boxes[i].cpu().numpy()
                score = scores[i].cpu().numpy()
                label = labels[i].cpu().numpy()

                # 绘制边界框
                x0, y0, x1, y1 = box
                width, height = x1 - x0, y1 - y0
                rect = plt.Rectangle((x0, y0), width, height, fill=False, color='red', linewidth=2)
                ax.add_patch(rect)

                # 显示类别标签和置信度
                class_name = self._classes[label].name  # 获取类别名称
                ax.text(x0, y0, f'{class_name}: {score:.2f}', 
                        bbox=dict(facecolor='yellow', alpha=0.5), fontsize=12, color='black')
            plt.axis('off')
            plt.show()


        # 分割        
        # Now run SAM to compute segmentation mask
        start_time = time.time()
        input_boxes = boxes.detach().to(self.device)  

        self.mask_predictor.set_image(rgb.cpu().numpy())   
        # self.mask_predictor.set_image(rgb)    

        if len(input_boxes) == 0:
            print('no candidated object')
            return
        
        transformed_boxes = self.mask_predictor.transform.apply_boxes_torch(input_boxes.reshape(-1, 4), rgb.shape[:2])  
        masks, iou_predictions, low_res_masks = self.mask_predictor.predict_torch(
            point_coords=None,
            point_labels=None,
            boxes=transformed_boxes,
            multimask_output=False
        )
        masks = masks[:, 0, :, :]
        end_time = time.time()
        print('segmentation time: {} s'.format(end_time - start_time))
        
        # 展示分割结果
        if self._visible:
            plt.imshow(raw_rgb)
            for i in range(masks.shape[0]):
                self.show_mask(masks[i].cpu().numpy(), plt.gca())
                self.show_box(boxes[i].cpu().numpy(), plt.gca(), self._classes[(labels[i].cpu()).numpy()].name)
                # show_points(input_points[i],input_labels[i], plt.gca())
                
            plt.axis('off')
            plt.show()

    def draw_boxes_on_image(self, raw_rgb, boxes, scores, labels):
        # 转换为 BGR 格式用于 OpenCV 操作
        image = cv2.cvtColor(raw_rgb, cv2.COLOR_RGB2BGR)
        
        for i in range(len(boxes)):
            box = boxes[i].cpu().numpy().astype(int)
            score = scores[i].cpu().numpy()
            label = labels[i].cpu().numpy()

            x0, y0, x1, y1 = box
            class_name = self._classes[label].name

            # 绘制矩形框
            cv2.rectangle(image, (x0, y0), (x1, y1), (0, 0, 255), 2)  # 红色框
            
            # 添加文本标签
            label_text = f"{class_name}: {score:.2f}"
            (text_width, text_height), _ = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            cv2.rectangle(image, (x0, y0 - text_height - 5), (x0 + text_width, y0), (0, 255, 255), -1)  # 黄色背景
            cv2.putText(image, label_text, (x0, y0 - 5), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 0), 1, cv2.LINE_AA)  # 黑色文字

        return image

    def post_process_object_detection(self,
        outputs, threshold: float = 0.15, target_sizes = None
    ):
        logits, boxes, class_embeddings = outputs.logits, outputs.pred_boxes, outputs.class_embeds
        
        if target_sizes is not None:
            if len(logits) != len(target_sizes):
                raise ValueError(
                    "Make sure that you pass in as many target sizes as the batch dimension of the logits"
                )

        probs = torch.max(logits, dim=-1)
        scores = torch.sigmoid(probs.values)
        labels = probs.indices
        
        # Convert to [x0, y0, x1, y1] format
        boxes = self.center_to_corners_format(boxes)
        
        img_h, img_w, channel = target_sizes.squeeze(0)

        size = torch.max(img_h, img_w)
        scale_fct = torch.stack([size, size, size, size], dim=-1).to(boxes.device)

        boxes = boxes * scale_fct

        results = []
        for s, l, b, c in zip(scores, labels, boxes, class_embeddings):
            score = s[s > threshold]
            label = l[s > threshold]
            box = b[s > threshold]
            class_embed = c[s > threshold]
            results.append({"scores": score, "labels": label, "boxes": box, "class_embed": class_embed})

        return results

    def center_to_corners_format(self, bboxes_center):
        center_x, center_y, width, height = bboxes_center.unbind(-1)
        bbox_corners = torch.stack(
            # top left x, top left y, bottom right x, bottom right y
            [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)],
            dim=-1,
        )
        return bbox_corners
    
    def show_mask(self, mask, ax, random_color=True):
        if random_color:
            color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
        else:
            color = np.array([30/255, 144/255, 255/255, 0.6])
        h, w = mask.shape[-2:]
        mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
        ax.imshow(mask_image)

    def show_box(self, box, ax, label=None):
        x0, y0 = box[0], box[1]
        w, h = box[2] - box[0], box[3] - box[1]
        ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))    
        if label:
            plt.text(x0, y0, label)
    
    def show_mask_and_label(self, image, combined_mask, label_info, cv_show_time=0.0):
        """
        显示带有mask的图像，并在每个mask的中心显示对应的标签信息。
        
        参数:
            image (np.array): 原始图像，形状为(H, W, 3)。
            combined_mask (np.array): 合并后的二维mask矩阵，形状为(H, W)。
            label_info (list): 实例分割的标签信息列表。
            cv_show_time (float): 显示图像的时间（秒），如果为0则等待按键。
        """
        # 创建一个彩色mask图像
        mask_image = np.zeros_like(image)
        unique_instance_ids = np.unique(combined_mask)

        # 用于保存所有有效 mask 的布尔掩码（排除 id == 0）
        valid_mask = np.zeros_like(combined_mask, dtype=bool)

        # 为每个类别生成不同颜色的mask，并记录有效区域
        has_mask = False
        for id in unique_instance_ids:
            if id == 0:
                continue  # 跳过背景
            
            has_mask = True
            color = get_color(id)
            current_mask = (combined_mask == id)
            mask_image[current_mask] = color
            valid_mask |= current_mask  # 合并所有有效的 mask

        # 仅对有效 mask 区域进行叠加
        overlayed_image = image.copy()
        alpha = 0.3
        if has_mask:
            overlayed_image[valid_mask] = cv2.addWeighted(image[valid_mask], 1, mask_image[valid_mask], alpha, 0)

        # 在每个实例的中心位置绘制类别标签 
        for info in label_info:
            instance_id = info['id']
            category_id = info['category_id']
            class_name = self._classes[category_id].name  # 根据ID获取类别名称

            # 获取当前实例的mask坐标
            coords = np.column_stack(np.where(combined_mask == instance_id))
            if len(coords) == 0:
                continue

            # 计算mask的质心
            y_center, x_center = coords.mean(axis=0).astype(int)

            # 在图像上绘制类别标签
            cv2.putText(
                overlayed_image,
                f'{class_name}: {instance_id}',
                (x_center, y_center),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.3,
                (255, 255, 255),
                1,
                cv2.LINE_AA
            )

        # 显示图像
        cv2.imshow("Image", image)
        cv2.imshow("Mask and Label", overlayed_image)
        cv2.imshow("mask", mask_image)
        if cv_show_time > 0:
            cv2.waitKey(int(cv_show_time * 1000))
        else:
            cv2.waitKey(0)

    def filter_not_valid_detection(self, boxes, scores, labels, features):
        # 对结果排序，先按照类别排序，然后按置信度从高到低排序
        sorted_results = []

        min_box_area = 20 * 20
        def box_area(box):
            x0, y0, x1, y1 = box.cpu().numpy()
            width = x1 - x0
            height = y1 - y0
            area = width * height
            return area
        
        for box, score, label, feature in zip(boxes, scores, labels, features):
            # 将sorted_results中box较小的框过滤掉
            if box_area(box) < min_box_area:
                continue
            sorted_results.append({'box' : box, 'score' : score, 'label' : label, 'feature' : feature})
        sorted_results = sorted(sorted_results, key=cmp_to_key(results_cmp))
        # 记录被筛选的结果
        filtered_results = []

        # 同类型的检测框如果重叠比例较大，则只保留得分最高的
        if len(sorted_results) < 2:
            filtered_results = sorted_results
        else :
            # 记录被筛选的结果id
            ignore_ids = set()
            for i in range(len(sorted_results) - 1):
                if i in ignore_ids:
                    continue
                filtered_results.append(sorted_results[i])
                    
                res1 = sorted_results[i]
                j = i + 1
                
                while j < len(sorted_results) :
                    res2 = sorted_results[j]
                    if res1['label'] != res2['label']:
                        break
                    cover_rate = calculate_cover(res1['box'], res2['box'], res1['score'], res2['score'])
                    if cover_rate > 0.6 or cover_rate < 0:
                        ignore_ids.add(j)
                    j = j + 1
        if len(filtered_results) == 0 :
            return [], [], [], []
        boxes = torch.cat([res['box'] for res in filtered_results], dim=0).reshape(-1, 4)
        scores = [res['score'] for res in filtered_results]
        labels = [res['label'].cpu() for res in filtered_results]
        features = [res['feature'] for res in filtered_results]

        return boxes, scores, labels, features
        

if __name__ == '__main__':
    # 获取当前脚本所在目录
    current_dir = os.path.dirname(os.path.abspath(__file__))
    # 构建 class_label.csv 路径
    class_label_file = os.path.join(current_dir, '..', 'config', 'class_label.csv')

    object_detect_sam = ObjectDetectAndSegment(class_label_file)
    object_detect_sam.load_model(model_path="/home/zxw/models")
    # object_detect_sam.test_One_Image(image_path="/home/zxw/dataset/semantic/self_collect/bottle.jpg", save_result=True)
    object_detect_sam.process_all_color_images("/home/zxw/dataset/semantic/self_collect/change_test/realsense_orb_slam_for_multi_2025-05-30-17-28-25")