import os
import cv2
import numpy as np
import torch
from ultralytics import YOLO as YOLO_STD
from scipy.ndimage import label
import torchvision
import time
import copy
from collections import defaultdict
# ============ 加载RGBD模型 ============    
import importlib.util
import sys
import os
ultra_rgbd_path = '/path_to/yolo12/ultralytics_RGBD/ultralytics_rgbd'
init_file = os.path.join(ultra_rgbd_path, '__init__.py')

spec = importlib.util.spec_from_file_location("ultralytics_rgbd",init_file)
ultralytics_rgbd = importlib.util.module_from_spec(spec)
sys.modules["ultralytics_rgbd"] = ultralytics_rgbd  # 避免重复加载
spec.loader.exec_module(ultralytics_rgbd)

YOLO_RGBD = ultralytics_rgbd.YOLO


# ============ 参数配置 ============
component_model_path='component_model.pt'
subcp_model_path = 'subcp_model.pt'
slice_size = 1024
overlap = 102

component_resize = (640, 640)  # 子组件图像resize尺寸

vis_save_path = "/path_to//vis_bg_real_d1"
slice_save_path="/path_to//slice_bg_real_d1"
big_mask_save_path='/path_to//bigmask'
os.makedirs(vis_save_path, exist_ok=True)
os.makedirs(slice_save_path, exist_ok=True)

# ============ 加载模型 ============
component_model = YOLO_RGBD(component_model_path)
component_model.to("cuda:0")
subcp_model = YOLO_RGBD(subcp_model_path)
subcp_model.to("cuda:0")
# ============ 辅助函数 ============

def slice_image(img, slice_size=1024, overlap=102):
    h, w = img.shape[:2]
    step = slice_size - overlap
    slices = []
    positions = []
    for y in range(0, h, step):
        for x in range(0, w, step):
            x_end = min(x + slice_size, w)
            y_end = min(y + slice_size, h)
            x0 = x_end - slice_size if x_end - x < slice_size else x
            y0 = y_end - slice_size if y_end - y < slice_size else y
            slice_img = img[y0:y0+slice_size, x0:x0+slice_size]
            slices.append(slice_img)
            positions.append((x0, y0))
    return slices, positions

def merge_masks(mask_list, img_shape):
    full_mask = np.zeros(img_shape, dtype=np.uint8)
    for mask, (x, y) in mask_list:
        h, w = mask.shape
        full_mask[y:y+h, x:x+w] = np.maximum(full_mask[y:y+h, x:x+w], mask)
    return full_mask
def merge_masks_nmm(mask_list, img_shape, iou_threshold=0.01):
    full_mask = np.zeros(img_shape, dtype=np.uint8)
    mask_info = []
    merged_masks = []

    for mask, (x, y) in mask_list:
        h, w = mask.shape
        mask_info.append({
            'mask': mask,
            'x': x,
            'y': y,
            'area': np.sum(mask)
        })

    mask_info.sort(key=lambda x: x['area'], reverse=True)

    for info in mask_info:
        overlap = np.logical_and(full_mask[info['y']:info['y']+info['mask'].shape[0], 
                                          info['x']:info['x']+info['mask'].shape[1]], 
                                 info['mask'])
        iou = np.sum(overlap) / (np.sum(info['mask']) + np.sum(full_mask) - np.sum(overlap))

        if iou < iou_threshold:
            object_mask = np.zeros(img_shape, dtype=np.uint8)
            object_mask[info['y']:info['y']+info['mask'].shape[0], 
                        info['x']:info['x']+info['mask'].shape[1]] = info['mask']
            merged_masks.append(object_mask)

            full_mask[info['y']:info['y']+info['mask'].shape[0], 
                      info['x']:info['x']+info['mask'].shape[1]] = np.logical_or(
                full_mask[info['y']:info['y']+info['mask'].shape[0], 
                          info['x']:info['x']+info['mask'].shape[1]],
                info['mask']
            )
    return merged_masks, full_mask

def get_bbox_from_mask(mask):
    coords = cv2.findNonZero(mask)
    x, y, w, h = cv2.boundingRect(coords)
    return x, y, w, h

# ============ 主流程 ============
def get_bn_stats(model):
    bn_stats = {}
    for name, module in model.named_modules():
        if isinstance(module, torch.nn.BatchNorm2d):
            bn_stats[name] = {
                'running_mean': module.running_mean.clone(),
                'running_var': module.running_var.clone(),
            }
    return bn_stats
def get_param_snapshot(model):
    # Strip prefix to ensure consistent key names
    return {name.strip(): param.clone() for name, param in model.named_parameters()}


def compare_params(before, after):
    changed = []
    for name in before:
        if not torch.equal(before[name], after[name]):
            changed.append(name)
    return changed

def compare_bn_stats(before, after):
    changed = []
    for name in before:
        mean_changed = not torch.equal(before[name]['running_mean'], after[name]['running_mean'])
        var_changed = not torch.equal(before[name]['running_var'], after[name]['running_var'])
        if mean_changed or var_changed:
            changed.append(name)
    return changed

import torchvision.ops as ops

def box_nmm(boxes, scores, labels, iou_threshold=0.1):
    """
    Perform Non-Maximum Merging (NMM) on the bounding boxes.
    
    Args:
        boxes: Tensor of bounding boxes in the format [x1, y1, x2, y2].
        scores: Tensor of confidence scores corresponding to each bounding box.
        labels: Tensor of class labels corresponding to each bounding box.
        iou_threshold: IoU threshold for merging.
    
    Returns:
        merged_boxes: Tensor of merged bounding boxes in the format [x1, y1, x2, y2].
        merged_scores: Tensor of confidence scores corresponding to the merged bounding boxes.
        merged_labels: Tensor of class labels corresponding to the merged bounding boxes.
    """
    merged_boxes = []
    merged_scores = []
    merged_labels = []

    while boxes.size(0):
        # Find the box with the maximum score
        max_score_index = int(scores.argmax())
        max_score_box = boxes[max_score_index]
        max_score_label = labels[max_score_index]
        max_score = scores[max_score_index]

        # Calculate IoU between the max score box and all other boxes
        ious = ops.box_iou(max_score_box.unsqueeze(0), boxes)[0]

        # Identify boxes to merge (IoU > threshold)
        merge_indices = (ious > iou_threshold).nonzero(as_tuple=False).squeeze(1)

        # Merge the boxes by averaging their coordinates
        if merge_indices.numel() > 1:
            x1 = boxes[merge_indices][:, 0].min()
            y1 = boxes[merge_indices][:, 1].min()
            x2 = boxes[merge_indices][:, 2].max()
            y2 = boxes[merge_indices][:, 3].max()
            merged_box = torch.tensor([x1, y1, x2, y2])
            merged_score = scores[merge_indices].mean()
            merged_label = max_score_label  # Assuming the label of the max score box
        else:
            merged_box = max_score_box
            merged_score = max_score
            merged_label = max_score_label

        # Add the merged box, score, and label to the results
        merged_boxes.append(merged_box)
        merged_scores.append(merged_score)
        merged_labels.append(merged_label)

        # Remove the merged boxes from the list
        boxes = boxes[~(ious > iou_threshold)]
        scores = scores[~(ious > iou_threshold)]
        labels = labels[~(ious > iou_threshold)]

    # Convert lists to tensors
    merged_boxes = torch.stack(merged_boxes)
    merged_scores = torch.tensor(merged_scores)
    merged_labels = torch.tensor(merged_labels)

    return merged_boxes, merged_scores, merged_labels
def merge_boxnm(detections_list, img_shape, iou_threshold=0.1):
    """
    Merge bounding boxes from different splits using Non-Maximum Suppression (NMS).
    
    Args:
        detections_list: List of detections from different splits.
        img_shape: Shape of the original image.
        iou_threshold: IoU threshold for NMS.
    
    Returns:
        Merged bounding boxes and class labels.
    """
    all_boxes = []
    all_scores = []
    all_labels = []

    for detection in detections_list:
        boxinfo,pos = detection
        x1, y1, x2, y2 = boxinfo[:4] # Extract bounding box coordinates
        score = boxinfo[4]  # Extract confidence score
        label = boxinfo[-1]  # Extract class label
        # Restore absolute box position
        x1 += pos[0]
        y1 += pos[1]
        x2 += pos[0]
        y2 += pos[1]

        all_boxes.append([x1, y1, x2, y2])
        all_scores.append(score)
        all_labels.append(label)
    
    all_boxes = torch.tensor(all_boxes)
    all_scores = torch.tensor(all_scores).to(torch.float64)
    all_labels = torch.tensor(all_labels).to(torch.float64)

    # Apply NMS
    merged_boxes, merged_scores, merged_labels = box_nmm(all_boxes, all_scores, all_labels,iou_threshold)

    return merged_boxes, merged_labels
def main(large_img_path):
    large_img = cv2.imread(large_img_path,cv2.IMREAD_UNCHANGED)
    img_h, img_w = large_img.shape[:2]
    raw_model = component_model.model  # This is the nn.Module
    # Only enable BN layers to train mode (for updating running_mean/var)
    for m in raw_model.modules():
        if isinstance(m, torch.nn.BatchNorm2d):
            m.train()
        else:
            m.eval()
    for param in raw_model.parameters():
        param.requires_grad = False



    # 1. 切分大图为1024x1024的切片
    starttime = time.time()
    slices, positions = slice_image(large_img, slice_size, overlap)

    # 2. 对每个切片使用component_model进行分割
    component_boxes = []
    component_type=[]
    
    for i, (slice_img, pos) in enumerate(zip(slices, positions)):
        results = component_model(slice_img, imgsz=slice_size,device="cuda:0")
        vis_img = results[0].plot()
        cv2.imwrite(os.path.join(slice_save_path, f"{i}_{pos}.png"), vis_img)
        if results[0].boxes is not None:
            for ii,box in enumerate(results[0].boxes.data):
                component_boxes.append((box.cpu().numpy(), pos))
                component_type.append(int(results[0].boxes.cls[ii]))
                
    endtime1 = time.time()
    # 3. 合并所有component masks
    merged_boxes, merged_labels=merge_boxnm(component_boxes, (img_h, img_w),)
    subcomponent_results = {}

    for comp_id, (box, label) in enumerate(zip(merged_boxes, merged_labels), 1):
        # 获取组件的边界框
        left, top, right, bottom = map(int, box)


        if comp_id==121:
            print("pause here")
        
        # 裁剪并缩放 component 图像 和 mask
        comp_img = large_img[top:bottom, left:right].copy()
        comp_img_resized  = cv2.resize(comp_img, component_resize)
   
        # 使用 subcp_model 对缩放后的 component 图像进行分割
        results = subcp_model(comp_img_resized, imgsz=component_resize[0],device="cuda:0")
        if results[0].masks is not None:
            sub_masks = results[0].masks.data
            subcomponent_results[comp_id] = {
                'masks': sub_masks.cpu().numpy(),
                'bbox': (left, top, right-left, bottom-top),
                'cls_id': results[0].boxes.cls.cpu().numpy()
            }

    # 6. 合并子组件级别的mask到大图
    full_subcp_mask = np.zeros((img_h, img_w), dtype=np.uint8)
    for comp_id, data in subcomponent_results.items():
        x, y, w, h = data['bbox']
        for i, (sub_mask,sub_cls_id) in enumerate(zip(data['masks'],data['cls_id'])):
            sub_mask_resized = cv2.resize((sub_mask > 0.5).astype(np.uint8), (w,h))
            # 使用不同的值表示引脚(1)、焊盘(2)和主体(3)
            full_subcp_mask[y:y+h, x:x+w] = np.maximum(full_subcp_mask[y:y+h, x:x+w], sub_mask_resized * (sub_cls_id))
    endtime = time.time()
    print(f"cp time: {endtime1 - starttime:.4f} seconds")
    print(f"Run time: {endtime - endtime1:.4f} seconds")
    # 7. 分割结果可视化
    vis_img = large_img[:,:,:3].copy()
    
    color_map = defaultdict(lambda:  [255, 0, 0],    # 蓝色 - 主体
                              {1: [0, 0, 255],   # 红色 - 引脚
                               2: [0, 255, 0],   # 绿色 - 焊盘
                               })

    # 应用颜色映射
    cv2.imwrite(os.path.join(big_mask_save_path,os.path.basename(large_img_path)),full_subcp_mask)
    for i in range(1,3):
        mask = (full_subcp_mask == i)
        vis_img[mask] = vis_img[mask] * 0.4 + np.array(color_map[i], dtype=np.uint8) * 0.6
    mask = (full_subcp_mask != 1) & (full_subcp_mask != 2) & (full_subcp_mask != 0)
    vis_img[mask] = vis_img[mask] * 0.4 + np.array(color_map[3], dtype=np.uint8) * 0.6

    for box in merged_boxes:
        x1, y1, x2, y2 = map(int, box)
        cv2.rectangle(vis_img, (x1, y1), (x2, y2), (255, 255, 255), 2)
    
    cv2.imwrite(os.path.join(vis_save_path, "result1_1.jpg"), vis_img)

    # 8. 可视化每个组件的subcp_model结果
    for comp_id, data in subcomponent_results.items():
        x, y, w, h = data['bbox']
        comp_img = large_img[y:y+h, x:x+w]
        comp_vis = comp_img[:,:,:3].copy()
        
        for i, sub_mask in enumerate(data['masks']):
            sub_mask_resized = cv2.resize((sub_mask > 0.5).astype(np.uint8), (w, h))
            comp_vis[sub_mask_resized > 0] = comp_vis[sub_mask_resized > 0] * 0.3 + np.array(color_map[int(data['cls_id'][i])], dtype=np.uint8) * 0.7
        
        # 绘制组件边界框
        cv2.rectangle(comp_vis, (0, 0), (w-1, h-1), (255, 255, 255), 2)
        
        # 保存每个组件的可视化结果
        cv2.imwrite(os.path.join(vis_save_path, f"component_{comp_id}_seg.jpg"), comp_vis)
        cv2.imwrite(os.path.join(vis_save_path, f"raw_{comp_id}.jpg"), comp_img)

from pathlib import Path
from PIL import Image  # 用于确认是否是图片，可选
from typing import Union
if __name__ == '__main__':
    large_img_path='/path_to/PCB2步分割4/demo_rgbd.png'
   
    main(large_img_path)
