import numpy as np
import cv2
import time
from rknn.api import RKNN

# 配置参数
OBJ_THRESH = 0.01  # 降低阈值进行测试
NMS_THRESH = 0.45
MAX_DETECT = 100
INPUT_SIZE = (640, 640)
SEG_THRESH = 0.5
CLASS_NAME = "battery-top"

def sigmoid(x):
    return 1 / (1 + np.exp(-np.clip(x, -500, 500)))

def xywh2xyxy(boxes):
    """转换中心坐标+宽高格式为边界框格式(x1, y1, x2, y2)"""
    xyxy = boxes.copy()
    xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2  # x1
    xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2  # y1
    xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2  # x2
    xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2  # y2
    return xyxy

def non_max_suppression(boxes, scores, threshold=NMS_THRESH):
    """非极大值抑制"""
    if len(boxes) == 0:
        return np.array([])
    
    boxes_xyxy = xywh2xyxy(boxes)
    indices = np.argsort(-scores)
    boxes = boxes_xyxy[indices]
    scores = scores[indices]
    
    keep = []
    while boxes.shape[0] > 0:
        keep.append(indices[0])
        if boxes.shape[0] == 1:
            break
            
        inter_x1 = np.maximum(boxes[0, 0], boxes[1:, 0])
        inter_y1 = np.maximum(boxes[0, 1], boxes[1:, 1])
        inter_x2 = np.minimum(boxes[0, 2], boxes[1:, 2])
        inter_y2 = np.minimum(boxes[0, 3], boxes[1:, 3])
        
        inter_area = np.maximum(0, inter_x2 - inter_x1) * np.maximum(0, inter_y2 - inter_y1)
        area0 = (boxes[0, 2] - boxes[0, 0]) * (boxes[0, 3] - boxes[0, 1])
        area1 = (boxes[1:, 2] - boxes[1:, 0]) * (boxes[1:, 3] - boxes[1:, 1])
        union_area = area0 + area1 - inter_area
        iou = inter_area / np.maximum(union_area, 1e-7)
        
        mask = iou < threshold
        boxes = boxes[1:][mask]
        scores = scores[1:][mask]
        indices = indices[1:][mask]
    
    return np.array(keep)

def preprocess_image(image, input_size=(640, 640)):
    """标准YOLOv8预处理"""
    h, w = image.shape[:2]
    scale = min(input_size[0] / w, input_size[1] / h)
    
    new_w = int(w * scale)
    new_h = int(h * scale)
    
    resized = cv2.resize(image, (new_w, new_h))
    
    # 创建画布并居中放置
    canvas = np.full((input_size[1], input_size[0], 3), 114, dtype=np.uint8)
    start_x = (input_size[0] - new_w) // 2
    start_y = (input_size[1] - new_h) // 2
    canvas[start_y:start_y + new_h, start_x:start_x + new_w] = resized
    
    # 归一化并保持NHWC格式
    canvas = canvas.astype(np.float32) / 255.0
    canvas = np.expand_dims(canvas, axis=0)
    
    return canvas, scale, start_x, start_y

def decode_segmentation_mask(output1, mask_coeffs, boxes, img_size, scale, pad_x, pad_y):
    """解码分割掩码"""
    # 处理output1格式
    if len(output1.shape) == 4:  # (1, 32, 160, 160)
        if output1.shape[1] == 32:
            prototypes = output1[0].transpose(1, 2, 0)  # (160, 160, 32)
        else:  # (1, 160, 160, 32)
            prototypes = output1[0]
    else:  # (32, 160, 160)
        prototypes = output1.transpose(1, 2, 0)
    
    seg_results = []
    
    for i, (box, coeff) in enumerate(zip(boxes, mask_coeffs)):
        try:
            # 生成掩码
            mask = np.matmul(coeff.reshape(1, -1), prototypes.reshape(-1, 160 * 160))
            mask = sigmoid(mask).reshape(160, 160)
            mask = (mask > SEG_THRESH).astype(np.uint8) * 255
            
            # 坐标转换
            x, y, w, h = box
            x1, y1, x2, y2 = x - w/2, y - h/2, x + w/2, y + h/2
            
            # 转换回原图坐标
            x1_orig = (x1 - pad_x) / scale
            y1_orig = (y1 - pad_y) / scale
            x2_orig = (x2 - pad_x) / scale
            y2_orig = (y2 - pad_y) / scale
            
            # 确保坐标有效
            x1_orig = max(0, x1_orig)
            y1_orig = max(0, y1_orig)
            x2_orig = min(img_size[0], x2_orig)
            y2_orig = min(img_size[1], y2_orig)
            
            w_actual = int(x2_orig - x1_orig)
            h_actual = int(y2_orig - y1_orig)
            
            if w_actual > 0 and h_actual > 0:
                mask_resized = cv2.resize(mask, (w_actual, h_actual))
                seg_results.append((x1_orig, y1_orig, x2_orig, y2_orig, mask_resized))
                
        except Exception as e:
            print(f"处理掩码 {i} 时出错: {e}")
            continue
    
    return seg_results

def smart_threshold_selection(confidences, raw_confidences=None):
    """智能阈值选择"""
    print(f"置信度分析:")
    print(f"  范围: [{confidences.min():.6f}, {confidences.max():.6f}]")
    print(f"  均值: {confidences.mean():.6f}")
    print(f"  标准差: {confidences.std():.6f}")
    
    # 如果标准差太小，说明所有值都很接近
    if confidences.std() < 0.001:
        print("检测到置信度标准差过小，可能存在量化问题")
        
        if raw_confidences is not None:
            print(f"原始置信度范围: [{raw_confidences.min():.6f}, {raw_confidences.max():.6f}]")
            print(f"原始置信度标准差: {raw_confidences.std():.6f}")
            
            # 如果原始值有更好的分布，使用原始值
            if raw_confidences.std() > confidences.std():
                print("使用原始置信度值")
                confidences = raw_confidences
                # 使用percentile作为阈值
                threshold = np.percentile(confidences, 95)  # 取前5%
                print(f"动态阈值(95th percentile): {threshold:.6f}")
                return confidences, threshold
        
        # 尝试使用不同的变换
        print("尝试使用原始logits进行softmax变换")
        if raw_confidences is not None:
            # 使用softmax而不是sigmoid
            exp_vals = np.exp(raw_confidences - np.max(raw_confidences))
            softmax_conf = exp_vals / np.sum(exp_vals)
            if softmax_conf.std() > confidences.std():
                print("使用softmax变换")
                confidences = softmax_conf
                threshold = np.percentile(confidences, 99)
                print(f"Softmax阈值(99th percentile): {threshold:.6f}")
                return confidences, threshold
        
        # 最后尝试：使用非常低的阈值
        threshold = 0.0001
        print(f"使用极低阈值: {threshold}")
        return confidences, threshold
    
    # 正常情况：使用标准阈值或动态调整
    high_conf_count = np.sum(confidences > OBJ_THRESH)
    if high_conf_count == 0:
        # 没有高置信度检测，使用动态阈值
        threshold = np.percentile(confidences, 99.5)  # 前0.5%
        threshold = max(0.001, min(0.5, threshold))
        print(f"动态阈值(99.5th percentile): {threshold:.6f}")
        return confidences, threshold
    else:
        print(f"使用标准阈值: {OBJ_THRESH}")
        return confidences, OBJ_THRESH

def post_process_complete(output0, output1, orig_img_size, scale, pad_x, pad_y):
    """完整的后处理流程"""
    print(f"输出形状: output0={output0.shape}, output1={output1.shape}")
    
    # 处理output0格式
    if len(output0.shape) == 3:
        if output0.shape[1] == 37:  # (1, 37, 8400)
            output0 = output0[0]
        else:  # (1, 8400, 37)
            output0 = output0[0].T
    elif len(output0.shape) == 2:
        if output0.shape[0] == 8400:  # (8400, 37)
            output0 = output0.T
    
    print(f"处理后output0形状: {output0.shape}")
    
    # 解析输出
    boxes = output0[:4].T  # (8400, 4)
    raw_confidences = output0[4]  # (8400,)
    mask_coeffs = output0[5:37].T  # (8400, 32)
    
    # 应用sigmoid
    confidences = sigmoid(raw_confidences)
    
    # 智能阈值选择
    confidences, threshold = smart_threshold_selection(confidences, raw_confidences)
    
    # 应用阈值
    keep_indices = confidences > threshold
    print(f"阈值筛选: {np.sum(keep_indices)} / {len(confidences)} 个目标")
    
    if np.sum(keep_indices) == 0:
        print("没有目标通过阈值筛选，尝试更宽松的条件")
        # 取置信度最高的前10个
        top_indices = np.argsort(-confidences)[:10]
        keep_indices = np.zeros(len(confidences), dtype=bool)
        keep_indices[top_indices] = True
        print(f"使用top-10策略，选择了{np.sum(keep_indices)}个目标")
    
    boxes = boxes[keep_indices]
    confidences = confidences[keep_indices]
    mask_coeffs = mask_coeffs[keep_indices]
    
    if len(boxes) == 0:
        return np.array([]), np.array([]), []
    
    # 输出前几个检测的详细信息
    print("前5个检测结果:")
    for i in range(min(5, len(boxes))):
        x, y, w, h = boxes[i]
        conf = confidences[i]
        print(f"  {i}: 中心=({x:.1f}, {y:.1f}), 尺寸=({w:.1f}x{h:.1f}), 置信度={conf:.4f}")
    
    # 过滤无效边界框
    valid_mask = (boxes[:, 2] > 5) & (boxes[:, 3] > 5) & \
                 (boxes[:, 0] > 0) & (boxes[:, 1] > 0) & \
                 (boxes[:, 0] < 640) & (boxes[:, 1] < 640) & \
                 (boxes[:, 2] < 640) & (boxes[:, 3] < 640)
    
    boxes = boxes[valid_mask]
    confidences = confidences[valid_mask]
    mask_coeffs = mask_coeffs[valid_mask]
    
    print(f"有效性筛选后: {len(boxes)} 个目标")
    
    if len(boxes) == 0:
        return np.array([]), np.array([]), []
    
    # NMS
    keep_indices = non_max_suppression(boxes, confidences)
    if len(keep_indices) == 0:
        return np.array([]), np.array([]), []
        
    boxes = boxes[keep_indices]
    confidences = confidences[keep_indices]
    mask_coeffs = mask_coeffs[keep_indices]
    
    print(f"NMS筛选后: {len(boxes)} 个目标")
    
    # 限制数量
    if len(boxes) > MAX_DETECT:
        boxes = boxes[:MAX_DETECT]
        confidences = confidences[:MAX_DETECT]
        mask_coeffs = mask_coeffs[:MAX_DETECT]
    
    # 解码分割掩码
    seg_results = decode_segmentation_mask(output1, mask_coeffs, boxes, orig_img_size, scale, pad_x, pad_y)
    print(f"成功解码 {len(seg_results)} 个分割掩码")
    
    return boxes, confidences, seg_results

def visualize_results_complete(image, boxes, confidences, seg_results, scale, pad_x, pad_y):
    """完整的可视化"""
    result_img = image.copy()
    
    print(f"可视化 {len(boxes)} 个检测框和 {len(seg_results)} 个分割掩码")
    
    # 先绘制分割掩码
    for i, (x1, y1, x2, y2, mask) in enumerate(seg_results):
        try:
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
            
            if x2 > x1 and y2 > y1 and x1 >= 0 and y1 >= 0 and x2 <= image.shape[1] and y2 <= image.shape[0]:
                region = result_img[y1:y2, x1:x2]
                
                if region.shape[0] > 0 and region.shape[1] > 0:
                    # 调整掩码大小
                    if mask.shape != region.shape[:2]:
                        mask = cv2.resize(mask, (region.shape[1], region.shape[0]))
                    
                    # 创建彩色覆盖
                    mask_colored = np.zeros_like(region)
                    mask_colored[..., 1] = 255  # 绿色
                    
                    # 应用掩码
                    mask_bool = mask > 128
                    alpha = 0.3
                    result_img[y1:y2, x1:x2][mask_bool] = \
                        (1 - alpha) * region[mask_bool] + alpha * mask_colored[mask_bool]
                    
                    print(f"分割掩码 {i}: 位置=({x1}, {y1}, {x2}, {y2}), 掩码大小={mask.shape}")
                    
        except Exception as e:
            print(f"绘制分割掩码 {i} 时出错: {e}")
            continue
    
    # 再绘制检测框
    for i, (box, score) in enumerate(zip(boxes, confidences)):
        x, y, w, h = box
        x1, y1, x2, y2 = x - w/2, y - h/2, x + w/2, y + h/2
        
        # 坐标转换
        x1_orig = (x1 - pad_x) / scale
        y1_orig = (y1 - pad_y) / scale
        x2_orig = (x2 - pad_x) / scale
        y2_orig = (y2 - pad_y) / scale
        
        # 转换为整数并限制范围
        orig_h, orig_w = image.shape[:2]
        x1_int = max(0, min(orig_w - 1, int(x1_orig)))
        y1_int = max(0, min(orig_h - 1, int(y1_orig)))
        x2_int = max(x1_int + 1, min(orig_w, int(x2_orig)))
        y2_int = max(y1_int + 1, min(orig_h, int(y2_orig)))
        
        # 绘制边界框
        cv2.rectangle(result_img, (x1_int, y1_int), (x2_int, y2_int), (0, 255, 0), 2)
        
        # 绘制标签背景
        label = f"{CLASS_NAME}: {score:.3f}"
        label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
        cv2.rectangle(result_img, (x1_int, y1_int - label_size[1] - 10), 
                     (x1_int + label_size[0], y1_int), (0, 255, 0), -1)
        cv2.putText(result_img, label, (x1_int, y1_int - 5), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
        
        print(f"检测框 {i}: 模型=({x:.1f}, {y:.1f}, {w:.1f}, {h:.1f}) -> "
              f"原图=({x1_orig:.1f}, {y1_orig:.1f}, {x2_orig:.1f}, {y2_orig:.1f}), "
              f"置信度={score:.3f}")
    
    return result_img

def detect_and_segment_complete(model_path, image_path, save_path="result.jpg"):
    """完整的检测和分割流程"""
    start_time = time.time()
    
    # 加载图像
    image = cv2.imread(image_path)
    if image is None:
        print(f"错误: 无法加载图像 {image_path}")
        return None
    
    print(f"图像尺寸: {image.shape}")
    orig_size = image.shape[1], image.shape[0]
    
    # 预处理
    processed, scale, pad_x, pad_y = preprocess_image(image, INPUT_SIZE)
    print(f"预处理参数: scale={scale:.3f}, pad_x={pad_x}, pad_y={pad_y}")
    
    # 模型推理
    print("进行模型推理...")
    try:
        rknn = RKNN()
        rknn.load_rknn(model_path)
        ret = rknn.init_runtime(target="rk3588", device_id=1)
        if ret != 0:
            ret = rknn.init_runtime(target="rk3588")
        
        outputs = rknn.inference(inputs=[processed])
        
        if len(outputs) >= 2:
            output0 = outputs[0]
            output1 = outputs[1]
            print("RKNN推理成功")
        else:
            print(f"输出数量不足")
            rknn.release()
            return None
            
        rknn.release()
        
    except Exception as e:
        print(f"RKNN推理失败: {e}")
        return None
    
    # 完整后处理
    print("后处理中...")
    boxes, confidences, seg_results = post_process_complete(output0, output1, orig_size, scale, pad_x, pad_y)
    
    # 完整可视化
    print("可视化结果...")
    result_img = visualize_results_complete(image, boxes, confidences, seg_results, scale, pad_x, pad_y)
    
    # 保存结果
    cv2.imwrite(save_path, result_img)
    print(f"结果保存至 {save_path}")
    
    # 统计
    inference_time = time.time() - start_time
    print(f"检测完成: 发现 {len(boxes)} 个目标, {len(seg_results)} 个分割结果 | 耗时: {inference_time:.2f}秒")
    
    return result_img

if __name__ == "__main__":
    MODEL_PATH = "./models/yolov8_seg.rknn"
    IMAGE_PATH = "./data/Color/1.png"
    OUTPUT_PATH = "detection_result.jpg"
    
    result = detect_and_segment_complete(MODEL_PATH, IMAGE_PATH, OUTPUT_PATH)
    
    if result is not None:
        cv2.imshow("Battery Top Detection", result)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    else:
        print("检测失败")