import os
import torch
import numpy as np
from PIL import Image, ImageOps
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
import torchvision.ops as ops  # 用于计算bbox


import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import matplotlib.patches as patches

# def mask2bbox(masks):
#     """Obtain tight bounding boxes of binary masks.

#     Args:
#         masks (Tensor): Binary mask of shape (n, h, w).

#     Returns:
#         Tensor: Bboxe with shape (n, 4) of \
#             positive region in binary mask.
#     """
#     N = masks.shape[0]
#     bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
#     x_any = torch.any(masks, dim=1)
#     y_any = torch.any(masks, dim=2)
#     for i in range(N):
#         x = torch.where(x_any[i, :])[0]
#         y = torch.where(y_any[i, :])[0]
#         if len(x) > 0 and len(y) > 0:
#             bboxes[i, :] = bboxes.new_tensor(
#                 [x[0], y[0], x[-1] + 1, y[-1] + 1])

#     return bboxes


def mask2bbox(masks, min_size=2):
    """Obtain tight bounding boxes of binary masks, with small box filtering. | 过滤掉无效bbox 

    Args:
        masks (Tensor): Binary mask of shape (n, h, w).
        min_size (int): 最小边长阈值，小于此尺寸的框会被过滤掉。

    Returns:
        Tensor: Bboxes with shape (n, 4)，无效框为全 0。
    """
    N = masks.shape[0]
    bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
    x_any = torch.any(masks, dim=1)
    y_any = torch.any(masks, dim=2)

    for i in range(N):
        x = torch.where(x_any[i, :])[0]
        y = torch.where(y_any[i, :])[0]
        if len(x) > 0 and len(y) > 0:
            x1, x2 = x[0].item(), x[-1].item() + 1
            y1, y2 = y[0].item(), y[-1].item() + 1
            w, h = x2 - x1, y2 - y1
            if w >= min_size and h >= min_size:  # 过滤极小框
                bboxes[i, :] = bboxes.new_tensor([x1, y1, x2, y2])
            else:
                bboxes[i, :] = bboxes.new_tensor([0, 0, 0, 0]) # 表示无效框
    return bboxes


def compute_iou(box_a, box_b):
    area_fn = lambda box: max(box[2] - box[0] + 1, 0) * max(box[3] - box[1] + 1, 0)
    i_area = area_fn([
        max(box_a[0], box_b[0]), max(box_a[1], box_b[1]),
        min(box_a[2], box_b[2]), min(box_a[3], box_b[3])
    ])
    u_area = area_fn(box_a) + area_fn(box_b) - i_area
    return i_area / u_area if u_area else 0

THRESHOLD = 0.3
COUNTING_THRESHOLD = 0.9
MAX_OBJECTS = 16
NMS_THRESHOLD = 1.0
POSITION_THRESHOLD = 0.1

obj_names_PATH = "/data01/lyl/step4_1024_multi_nodes/step4_gen_0707/DiffusionNFT/flow_grpo/gen_eval/object_names.txt"

classnames = []
with open(obj_names_PATH) as cls_file:
    classnames = [line.strip() for line in cls_file]

class Mask2FormerObjectDetector:
    def __init__(self, model_path="./mask2former", device="cuda", score_thresh=0.5):
        self.device = device
        self.processor = AutoImageProcessor.from_pretrained(model_path)
        self.model = Mask2FormerForUniversalSegmentation.from_pretrained(model_path).to(device)
        self.model.config.task_type = "instance"
        self.model.eval()

    def __call__(self, image_pils, metadatas):
        """
        Args:
            image_pils: List[PIL.Image]
            metadatas:  List[Dict]

        Returns:
            List[Dict]: 每张图的检测结果，包括boxes, scores, labels, segmentation, metadata
        """
        inputs = self.processor(images=image_pils, return_tensors="pt").to(self.device)
        with torch.no_grad():
            outputs = self.model(**inputs)

        # outputs包括 class_queries_logits 和 masks_queries_logits
        class_logits = outputs.class_queries_logits  # [batch, num_queries, num_classes+1]
        mask_logits = outputs.masks_queries_logits   # [batch, num_queries, h, w]

        target_sizes = [image.size[::-1] for image in image_pils]  # (height, width)

        detecteds = []
        batch_size = len(image_pils)
        for batch_idx in range(batch_size):
            # 对单张图片处理
            class_logit = class_logits[batch_idx]  # [num_queries, num_classes+1]
            mask_logit = mask_logits[batch_idx]    # [num_queries, h, w]

            # 计算类别概率 (softmax去掉最后背景类)
            scores_all = class_logit.softmax(-1)  # [num_queries, num_classes+1]
            scores, labels = scores_all[:, :-1].max(-1)  # 不考虑最后background类

            # 筛选高置信度目标
            keep = scores > 0
            
            if keep.sum() == 0:
                # 没有目标，返回空结果
                detecteds.append({}) # TODO: 待检查
                continue
            
            scores = scores[keep] # [num_kept]
            labels = labels[keep] # [num_kept]
            masks = mask_logit[keep]  # [num_kept, h, w] = [100,96,96]

            orig_size = target_sizes[batch_idx] 
            masks = torch.nn.functional.interpolate(masks.unsqueeze(1), size=orig_size, mode="bilinear", align_corners=False)
            masks = masks.squeeze(1)  # [num_kept, H, W]

            # 通过阈值化得到二值mask
            masks = masks > 0  # bool tensor
            bboxes = mask2bbox(masks.float().bool()) # torch.Size([100, 4])
            
            # ----------------------------后处理-------------------------------------

            confidence_threshold = THRESHOLD if metadatas[batch_idx]['tag'] != "counting" else COUNTING_THRESHOLD

            # 分类别处理
            detected = {}
            for class_idx, classname in enumerate(classnames):
                # 找到所有当前类的预测索引
                cls_indices = (labels == class_idx).nonzero(as_tuple=True)[0]

                if cls_indices.numel() == 0:
                    continue

                # 提取该类的 bboxes、scores、masks
                cls_scores = scores[cls_indices]
                cls_bboxes = bboxes[cls_indices]  # shape [N, 4]
                cls_masks = masks[cls_indices]    # shape [N, H, W]
                cls_scores_np = cls_scores.cpu().numpy()
                cls_bboxes_np = cls_bboxes.cpu().numpy()
                cls_masks_np = cls_masks.cpu().numpy()

                # 排序+阈值过滤
                ordering = np.argsort(cls_scores_np)[::-1] # 从高到低排序,得到排序的索引
                ordering = ordering[cls_scores_np[ordering] > confidence_threshold] # 先过滤掉低置信度目标
                
                # 过滤掉无效bbox(即bbox=0的相应索引) | add by xjh - 2025/8/21
                valid_mask = ~(np.all(cls_bboxes_np[ordering] == 0, axis=1)) # 过滤掉坐标全为0的bbox
                ordering = ordering[valid_mask]
                
                ordering = ordering[:MAX_OBJECTS].tolist() # 再限制最大目标数
                
                # NMS
                selected = []
                while ordering:
                    max_idx = ordering.pop(0) # 每次取出置信度最高的目标
                    box_with_score = np.concatenate([cls_bboxes_np[max_idx], [cls_scores_np[max_idx]]])
                    selected.append((box_with_score, cls_masks_np[max_idx]))

                    ordering = [ # NMS处理 - 从剩余的ordering中去除与当前最大目标重叠度过高的目标
                        idx for idx in ordering
                        if NMS_THRESHOLD == 1.0 or compute_iou(cls_bboxes_np[max_idx], cls_bboxes_np[idx]) < NMS_THRESHOLD # 如果 NMS_THRESHOLD == 1.0, 则不进行NMS
                    ]

                if selected:
                    detected[classname] = selected
            
            detecteds.append(detected)  # 有结果
            
        return detecteds

# mask_object_detector = Mask2FormerObjectDetector(model_path="/data01/lyl/step4_1024_multi_nodes/step4_gen_0707/DiffusionNFT/flow_grpo/gen_eval/mask2former")

# ----------------------------------------------------------------------------------------
# import mmdet
# from mmdet.apis import inference_detector, init_detector
# MY_CONFIG_PATH="your_mmdetection_path/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py"
# MY_CKPT_PATH="your_reward-server_path/reward-server/model/mask2former2"

import argparse
import json
import os
import re
import sys
import time
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from collections import defaultdict
from PIL import Image, ImageOps
import torch
import open_clip
from clip_benchmark.metrics import zeroshot_classification as zsc
zsc.tqdm = lambda it, *args, **kwargs: it

DEVICE = "cuda"
# gpu_id = 0
# DEVICE = torch.device(f"cuda:{gpu_id}")
# os.environ["HIP_VISIBLE_DEVICES"] = str(gpu_id)
# # torch.cuda.set_device(gpu_id)
# print(f"Using device: {DEVICE}")

def load_geneval(DEVICE):
    def timed(fn):
        def wrapper(*args, **kwargs):
            startt = time.time()
            result = fn(*args, **kwargs)
            endt = time.time()
            print(f'Function {fn.__name__!r} executed in {endt - startt:.3f}s', file=sys.stderr)
            return result
        return wrapper
    
    mask_object_detector = Mask2FormerObjectDetector(model_path="/data01/lyl/step4_1024_multi_nodes/step4_gen_0707/DiffusionNFT/flow_grpo/gen_eval/mask2former", device=DEVICE)
    
    # Load models
    @timed
    def load_models():        
        clip_arch = "ViT-L-14"
        # clip_model, _, transform = open_clip.create_model_and_transforms(clip_arch, pretrained="openai", device=DEVICE) #fuck you device=DEVICE！！！
        clip_model, _, transform = open_clip.create_model_and_transforms(clip_arch, pretrained="openai") 
        
        clip_model = clip_model.to(DEVICE)
#         if torch.cuda.device_count() > 1:
#             clip_model = torch.nn.DataParallel(clip_model)
        
        tokenizer = open_clip.get_tokenizer(clip_arch)
        
#         with open(os.path.join(os.getcwd(), "flow_grpo/gen_eval/object_names.txt")) as cls_file:
        with open(obj_names_PATH) as cls_file:
            classnames = [line.strip() for line in cls_file]

        return None, (clip_model, transform, tokenizer), classnames


    COLORS = ["red", "orange", "yellow", "green", "blue", "purple", "pink", "brown", "black", "white"]
    COLOR_CLASSIFIERS = {}

    # Evaluation parts

    class ImageCrops(torch.utils.data.Dataset):
        def __init__(self, image: Image.Image, objects):
            self._image = image.convert("RGB")
            bgcolor = "#999"
            if bgcolor == "original":
                self._blank = self._image.copy()
            else:
                self._blank = Image.new("RGB", image.size, color=bgcolor)
            self._objects = objects

        def __len__(self):
            return len(self._objects)

        def __getitem__(self, index):
            box, mask = self._objects[index]
            if mask is not None:
                # import pdb
                # pdb.set_trace()
                assert tuple(self._image.size[::-1]) == tuple(mask.shape), (index, self._image.size[::-1], mask.shape)
                image = Image.composite(self._image, self._blank, Image.fromarray(mask))
            else:
                image = self._image
            image = image.crop(box[:4])
            return (transform(image), 0)


    def color_classification(image, bboxes, classname):
        
        # 获取对应类别的CLIP颜色分类器
        if classname not in COLOR_CLASSIFIERS:
            COLOR_CLASSIFIERS[classname] = zsc.zero_shot_classifier(
                clip_model, tokenizer, COLORS,
                [
                    f"a photo of a {{c}} {classname}",
                    f"a photo of a {{c}}-colored {classname}",
                    f"a photo of a {{c}} object"
                ],
                str(DEVICE),
#                 'cuda',
            )
        clf = COLOR_CLASSIFIERS[classname]
        
        dataloader = torch.utils.data.DataLoader(
            ImageCrops(image, bboxes), # 裁剪出bbox框出的对象
            batch_size=16, num_workers=4
        )
        with torch.no_grad():
            pred, _ = zsc.run_classification(clip_model, clf, dataloader, DEVICE)
            return [COLORS[index.item()] for index in pred.argmax(1)] # 返回颜色列表


    def compute_iou(box_a, box_b):
        area_fn = lambda box: max(box[2] - box[0] + 1, 0) * max(box[3] - box[1] + 1, 0)
        i_area = area_fn([
            max(box_a[0], box_b[0]), max(box_a[1], box_b[1]),
            min(box_a[2], box_b[2]), min(box_a[3], box_b[3])
        ])
        u_area = area_fn(box_a) + area_fn(box_b) - i_area
        return i_area / u_area if u_area else 0


    def relative_position(obj_a, obj_b):
        """Give position of A relative to B, factoring in object dimensions"""
        boxes = np.array([obj_a[0], obj_b[0]])[:, :4].reshape(2, 2, 2)
        center_a, center_b = boxes.mean(axis=-2)
        dim_a, dim_b = np.abs(np.diff(boxes, axis=-2))[..., 0, :]
        offset = center_a - center_b
        #
        revised_offset = np.maximum(np.abs(offset) - POSITION_THRESHOLD * (dim_a + dim_b), 0) * np.sign(offset)
        if np.all(np.abs(revised_offset) < 1e-3):
            return set()
        #
        dx, dy = revised_offset / np.linalg.norm(offset)
        relations = set()
        if dx < -0.5: relations.add("left of")
        if dx > 0.5: relations.add("right of")
        if dy < -0.5: relations.add("above")
        if dy > 0.5: relations.add("below")
        return relations


    def evaluate(image, objects, metadata):
        """ 😊宽松：不要求严格等于，例如可以数量多 """
        """
        Evaluate given image using detected objects on the global metadata specifications.
        Assumptions:
        * Metadata combines 'include' clauses with AND, and 'exclude' clauses with OR
        * All clauses are independent, i.e., duplicating a clause has no effect on the correctness
        * CHANGED: Color and position will only be evaluated on the most confidently predicted objects;
            therefore, objects are expected to appear in sorted order
        """
        correct = True
        reason = []
        matched_groups = []
        # Check for expected objects
        for req in metadata.get('include', []):
            classname = req['class']
            matched = True
            found_objects = objects.get(classname, [])[:req['count']]
            if len(found_objects) < req['count']:
                correct = matched = False # 如果检测到的目标数小于要求的目标数
                reason.append(f"expected {classname}>={req['count']}, found {len(found_objects)}")
            else:
                if 'color' in req:
                    # Color check
                    colors = color_classification(image, found_objects, classname)
                    if colors.count(req['color']) < req['count']:
                        correct = matched = False # 如果检测到的颜色数小于要求的颜色数
                        reason.append(
                            f"expected {req['color']} {classname}>={req['count']}, found " +
                            f"{colors.count(req['color'])} {req['color']}; and " +
                            ", ".join(f"{colors.count(c)} {c}" for c in COLORS if c in colors)
                        )
                if 'position' in req and matched:
                    # Relative position check
                    expected_rel, target_group = req['position']
                    if matched_groups[target_group] is None:
                        correct = matched = False # 如果没有检测到相对目标组
                        reason.append(f"no target for {classname} to be {expected_rel}")
                    else:
                        for obj in found_objects:
                            for target_obj in matched_groups[target_group]:
                                true_rels = relative_position(obj, target_obj)
                                if expected_rel not in true_rels:
                                    correct = matched = False # 如果实际的相对位置关系不包含期望的相对位置关系
                                    reason.append(
                                        f"expected {classname} {expected_rel} target, found " +
                                        f"{' and '.join(true_rels)} target"
                                    )
                                    break
                            if not matched:
                                break
            if matched:
                matched_groups.append(found_objects)
            else:
                matched_groups.append(None)
        
        # Check for non-expected objects
        for req in metadata.get('exclude', []):
            classname = req['class']
            if len(objects.get(classname, [])) >= req['count']: # 如果检测到的目标数大于等于要求的目标数
                correct = False
                reason.append(f"expected {classname}<{req['count']}, found {len(objects[classname])}")
                
        return correct, "\n".join(reason)

    def evaluate_reward(image, objects, metadata):
        """ 😥严格：要求严格等于规定数量 """
        """
        Evaluate given image using detected objects on the global metadata specifications.
        Assumptions:
        * Metadata combines 'include' clauses with AND, and 'exclude' clauses with OR
        * All clauses are independent, i.e., duplicating a clause has no effect on the correctness
        * CHANGED: Color and position will only be evaluated on the most confidently predicted objects;
            therefore, objects are expected to appear in sorted order
        """
        correct = True # 是否严格完全匹配
        reason = []
        rewards = []
        matched_groups = []
        # Check for expected objects
        for req in metadata.get('include', []):
            classname = req['class']
            matched = True
            found_objects = objects.get(classname, []) # 实际检测到的该类对象
            
            # 1️⃣ 数量匹配奖励
            rewards.append(1-abs(req['count'] - len(found_objects))/req['count'])
            
            if len(found_objects) != req['count']: # 如果检测到的目标数不等于要求的目标数
                correct = matched = False
                reason.append(f"expected {classname}=={req['count']}, found {len(found_objects)}")
                # 1️⃣ 若数量不匹配 且有 color 或 position 要求 → 附加奖励直接给 0 分
                if 'color' in req or 'position' in req:
                    rewards.append(0.0)
            
            # 2️⃣ 如果数量匹配，进一步检查颜色
            else: 
                
                if 'color' in req: # 如果有颜色要求-检查颜色
                    colors = color_classification(image, found_objects, classname) # 使用CLIP
                    
                    # 2️⃣ 颜色匹配奖励
                    rewards.append(1-abs(req['count'] - colors.count(req['color']))/req['count']) # 计算公式：reward = 1 - |要求的目标数 - 检测到的颜色匹配的目标数| / 要求的目标数 
                    
                    if colors.count(req['color']) != req['count']: # 如果检测到的颜色数不等于要求的颜色数
                        correct = matched = False
                        reason.append(
                            f"expected {req['color']} {classname}>={req['count']}, found " +
                            f"{colors.count(req['color'])} {req['color']}; and " +
                            ", ".join(f"{colors.count(c)} {c}" for c in COLORS if c in colors)
                        )
                # 3️⃣ 如果数量和颜色都匹配，再检查位置
                if 'position' in req and matched: # 如果有位置要求-检查位置
                    # Relative position check
                    expected_rel, target_group = req['position'] # 期待的位置关系（expected_rel） + 相对于哪个目标（target_group） | 举例： "position": ["below", 0] 相对于索引0的目标
                    
                    if matched_groups[target_group] is None:
                        correct = matched = False
                        reason.append(f"no target for {classname} to be {expected_rel}")
                        # 3️⃣ 如果没有匹配的目标组，直接给0分
                        rewards.append(0.0)
                    
                    else: # 如果有匹配的目标组
                        for obj in found_objects:
                            for target_obj in matched_groups[target_group]:
                                true_rels = relative_position(obj, target_obj) # 计算实际的相对位置关系
                                if expected_rel not in true_rels: # 如果实际的相对位置关系不包含期望的相对位置关系 - 给0分
                                    correct = matched = False
                                    reason.append(
                                        f"expected {classname} {expected_rel} target, found " +
                                        f"{' and '.join(true_rels)} target"
                                    )
                                    # 3️⃣ 如果位置不匹配，直接给0分
                                    rewards.append(0.0)
                                    break
                            if not matched:
                                break
                        # 3️⃣ 位置匹配奖励
                        rewards.append(1.0)
            
            if matched: # 如果匹配成功，将当前对象组添加到匹配组中
                matched_groups.append(found_objects)
            else:
                matched_groups.append(None)
        
        # 4️⃣ 最终平均 reward
        reward = sum(rewards) / len(rewards) if rewards else 0
        
        return correct, reward, "\n".join(reason) # correct: 是否严格完全匹配, reward: 平均奖励分数, reason: 评估原因
    
    def evaluate_image(image_pils, metadatas, only_strict):
        
        detected_list = mask_object_detector(image_pils, metadatas) # 已经对检测结果进行了 置信度阈值过滤 + NMS处理
        ret = []
        
        # import pdb
        # pdb.set_trace() 
        
        # FIXME: FOR DEGUG
        # with open("ours_detected.json", "w") as f:
        #    json.dump(detected_list, f, indent=4)
                
        for detected, image_pil, metadata in zip(detected_list, image_pils, metadatas):
            image = ImageOps.exif_transpose(image_pil)
            
            is_strict_correct, score, reason = evaluate_reward(image, detected, metadata) # 计算reward | 数量匹配+颜色匹配+位置匹配
            
            if only_strict: # 如果只采用严格匹配 - 要求严格等于
                is_correct = False
            else: # 宽松匹配 - 不要求严格等于，例如可以数量多
                is_correct, _ = evaluate(image, detected, metadata) # 计算宽松匹配的正确性 | 还检测exclude要求（要求目标小于num）

            ret.append({
                'tag': metadata['tag'],
                'prompt': metadata['prompt'],
                'correct': is_correct, # 是否宽松匹配 | 如果only_strict=True, 则is_correct无效为False
                'strict_correct': is_strict_correct, # 是否严格完全匹配
                'score': score,
                'reason': reason,
                'metadata': json.dumps(metadata),
                'details': json.dumps({
                    key: [box.tolist() for box, _ in value]
                    for key, value in detected.items()
                })
            })

        return ret

    
    object_detector, (clip_model, transform, tokenizer), classnames = load_models()
    THRESHOLD = 0.3
    COUNTING_THRESHOLD = 0.9
    MAX_OBJECTS = 16
    NMS_THRESHOLD = 1.0
    POSITION_THRESHOLD = 0.1


    @torch.no_grad()
    def compute_geneval(images, metadatas, only_strict=False):
        required_keys = ['single_object', 'two_object', 'counting', 'colors', 'position', 'color_attr']
        scores = []
        strict_rewards = []
        grouped_strict_rewards = defaultdict(list)
        rewards = []
        grouped_rewards = defaultdict(list)
        
        results = evaluate_image(images, metadatas, only_strict=only_strict) # 数量匹配+颜色匹配+位置匹配
        
        for result in results:
            strict_rewards.append(1.0 if result["strict_correct"] else 0.0) # 严格匹配奖励
            scores.append(result["score"]) # 综合分数（数量匹配+颜色匹配+位置匹配）
            rewards.append(1.0 if result["correct"] else 0.0) # 宽松匹配奖励
            tag = result["tag"] # 任务类别
            for key in required_keys:
                if key != tag:  # 该任务类别不参与评分，给一个很低的分数
                    grouped_strict_rewards[key].append(-10.0)
                    grouped_rewards[key].append(-10.0)
                else:
                    grouped_strict_rewards[tag].append(1.0 if result["strict_correct"] else 0.0) # 该任务类别的严格匹配奖励
                    grouped_rewards[tag].append(1.0 if result["correct"] else 0.0) # 该任务类别的宽松匹配奖励
        return scores, rewards, strict_rewards, dict(grouped_rewards), dict(grouped_strict_rewards)

    return compute_geneval


if __name__ == "__main__":
    data = {
        "images": [
            Image.open(
                os.path.join(
                    os.path.dirname(os.path.abspath(__file__)),
                    "test_cases/a photo of a brown giraffe and a white stop sign.png",
                )
            )
        ],
        "metadatas": [
            {
                "tag": "color_attr",
                "include": [
                    {"class": "giraffe", "count": 1, "color": "red"},
                    {"class": "stop sign", "count": 1, "color": "white"},
                ],
                "prompt": "a photo of a brown giraffe and a white stop sign",
            }
        ],
        "only_strict": False,
    }
    compute_geneval = load_geneval("cuda")
#     compute_geneval = load_geneval()
    scores, rewards, strict_rewards, group_rewards, group_strict_rewards = compute_geneval(**data)
    print(f"Score: {scores}")
    print(f"Reward: {rewards}")
    print(f"Strict reward: {strict_rewards}")
    print(f"Group reward: {group_rewards}")
    print(f"Group strict reward: {group_strict_rewards}")
