import os
import cv2
import sys
import argparse
import numpy as np
import torch
import torchvision
import torch.nn.functional as F

# 直接从本地 py_utils 导入
from py_utils.coco_utils import COCO_test_helper
from py_utils.rknn_executor import RKNN_model_container

OBJ_THRESH = 0.6
NMS_THRESH = 0.45
MAX_DETECT = 1
IMG_SIZE = (640, 640)

CLASSES = ("battery-top",)

class Colors:
    def __init__(self):
        hexs = ('FF3838',)
        self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
        self.n = len(self.palette)

    def __call__(self, i, bgr=False):
        c = self.palette[int(i) % self.n]
        return (c[2], c[1], c[0]) if bgr else c

    @staticmethod
    def hex2rgb(h):
        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def filter_boxes(boxes, box_confidences, box_class_probs, seg_part):
    print(f"filter_boxes input shapes: boxes={boxes.shape}, box_confidences={box_confidences.shape}, box_class_probs={box_class_probs.shape}, seg_part={seg_part.shape}")
    box_confidences = box_confidences.reshape(-1)
    candidate, class_num = box_class_probs.shape
    class_max_score = np.max(box_class_probs, axis=-1)
    classes = np.argmax(box_class_probs, axis=-1)
    _class_pos = np.where(class_max_score * box_confidences >= OBJ_THRESH)[0]
    print(f"_class_pos: {_class_pos}")
    if len(_class_pos) == 0:
        return np.array([]), np.array([]), np.array([]), np.array([])
    scores = sigmoid(class_max_score * box_confidences)[_class_pos]
    boxes = boxes[_class_pos]
    classes = classes[_class_pos]
    seg_part = seg_part[_class_pos]
    print(f"filter_boxes output shapes: boxes={boxes.shape}, classes={classes.shape}, scores={scores.shape}, seg_part={seg_part.shape}")
    return boxes, classes, scores, seg_part

def dfl(position):
    x = torch.tensor(position)
    n, c, h, w = x.shape
    p_num = 4
    mc = c // p_num
    y = x.reshape(n, p_num, mc, h, w)
    y = y.softmax(2)
    acc_metrix = torch.tensor(range(mc)).float().reshape(1, 1, mc, 1, 1)
    y = (y * acc_metrix).sum(2)
    return y.numpy()

def box_process(position):
    if len(position.shape) == 3:
        batch, channels, num_anchors = position.shape
        grid_h, grid_w = 80, 105
        if grid_h * grid_w != num_anchors:
            raise ValueError(f"Cannot reshape {num_anchors} into grid_h={grid_h}, grid_w={grid_w}")
        position = position.reshape(batch, channels, grid_h, grid_w)
    else:
        grid_h, grid_w = position.shape[2:4]
    
    print(f"box_process input shape: {position.shape}")
    col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
    col = col.reshape(1, 1, grid_h, grid_w)
    row = row.reshape(1, 1, grid_h, grid_w)
    grid = np.concatenate((col, row), axis=1)
    stride = np.array([IMG_SIZE[1] // grid_h, IMG_SIZE[0] // grid_w]).reshape(1, 2, 1, 1)
    position = dfl(position)
    box_xy = grid + 0.5 - position[:, 0:2, :, :]
    box_xy2 = grid + 0.5 + position[:, 2:4, :, :]
    xyxy = np.concatenate((box_xy * stride, box_xy2 * stride), axis=1)
    print(f"box_process output shape: {xyxy.shape}")
    return xyxy

def post_process(input_data):
    if len(input_data) == 2:
        output, proto = input_data
        num_classes = 1
        reg_max = 4
        proto_channels = 32
        box_channels = 4 * reg_max
        cls_channels = num_classes
        
        print(f"post_process input shapes: output={output.shape}, proto={proto.shape}")
        boxes = output[:, :box_channels, :]
        classes_conf = output[:, box_channels:box_channels + cls_channels, :]
        seg_part = output[:, box_channels + cls_channels:, :]
        
        boxes = box_process(boxes)
        boxes = boxes.transpose(0, 2, 3, 1).reshape(-1, 4)
        classes_conf = sp_flatten(classes_conf)
        seg_part = sp_flatten(seg_part)
        scores = sigmoid(classes_conf)
        
        print(f"post_process pre-filter shapes: boxes={boxes.shape}, classes_conf={classes_conf.shape}, seg_part={seg_part.shape}")
        boxes, classes, scores, seg_part = filter_boxes(boxes, scores, classes_conf, seg_part)
        
        if len(boxes) == 0:
            return None, None, None, None
        
        zipped = zip(boxes, classes, scores, seg_part)
        sort_zipped = sorted(zipped, key=lambda x: x[2], reverse=True)
        boxes, classes, scores, seg_part = sort_zipped[0]
        boxes = np.array([boxes])
        classes = np.array([classes])
        scores = np.array([scores])
        seg_part = np.array([seg_part])
        
        ph, pw = proto.shape[-2:]
        proto = proto[:, :seg_part.shape[-1], :, :]
        proto = proto.reshape(seg_part.shape[-1], -1)
        seg_img = np.matmul(seg_part, proto)
        seg_img = sigmoid(seg_img)
        seg_img = seg_img.reshape(-1, ph, pw)
        seg_threadhold = 0.4
        seg_img = F.interpolate(torch.tensor(seg_img)[None], torch.Size([IMG_SIZE[1], IMG_SIZE[0]]), mode='bilinear', align_corners=False)[0]
        seg_img_t = _crop_mask(seg_img, torch.tensor(boxes))
        seg_img = seg_img_t.numpy()
        seg_img = (seg_img > seg_threadhold).astype(np.uint8)
        if seg_img.ndim == 2:
            seg_img = seg_img[None, :, :]
        print(f"post_process: final seg_img shape={seg_img.shape}")
        return boxes, classes, scores, seg_img
    
    else:
        raise ValueError(f"Expected 2 model outputs, got {len(input_data)}")

def sp_flatten(_in):
    ch = _in.shape[1]
    _in = _in.transpose(0, 2, 1)
    return _in.reshape(-1, ch)

def draw(image, boxes, scores, classes):
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = [int(_b) for _b in box]
        print("%s @ (%d %d %d %d) %.3f" % (CLASSES[cl], top, left, right, bottom, score))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

def _crop_mask(masks, boxes):
    n, h, w = masks.shape
    x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1)
    r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :]
    c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None]
    masks = masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
    print(f"_crop_mask: masks shape={masks.shape}, boxes={boxes}")
    return masks

def merge_seg(image, seg_img, classes):
    color = Colors()
    for i in range(len(seg_img)):
        seg = seg_img[i]
        seg = seg.astype(np.uint8)
        seg = cv2.cvtColor(seg, cv2.COLOR_GRAY2BGR)
        seg = seg * color(classes[i])
        seg = seg.astype(np.uint8)
        image = cv2.add(image, seg)
    return image

def setup_model(args):
    model_path = args.model_path
    if model_path.endswith('.rknn'):
        platform = 'rknn'
        model = RKNN_model_container(args.model_path, args.target, args.device_id)
    else:
        assert False, "{} is not rknn model".format(model_path)
    print('Model-{} is {} model, starting val'.format(model_path, platform))
    return model, platform

def img_check(path):
    img_type = ['.jpg', '.jpeg', '.png', '.bmp']
    for _type in img_type:
        if path.endswith(_type) or path.endswith(_type.upper()):
            return True
    return False

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process YOLOv8 segmentation.')
    parser.add_argument('--model_path', type=str, default='/root/ros_ws/src/OrbbecSDK_ROS1/scripts/models/merge_yolov8_seg.rknn', help='model path, should be .rknn file')
    parser.add_argument('--target', type=str, default='rk3588', help='target RKNPU platform')
    parser.add_argument('--device_id', type=str, default=None, help='device id')
    parser.add_argument('--img_show', action='store_true', default=False, help='draw the result and show')
    parser.add_argument('--img_save', action='store_true', default=True, help='save the result')
    parser.add_argument('--img_path', type=str, default='/root/ros_ws/src/OrbbecSDK_ROS1/scripts/data/Color/st.jpg', help='image path')

    args = parser.parse_args()

    # 初始化模型
    model, platform = setup_model(args)

    # 检查结果文件夹
    result_dir = '/root/ros_ws/src/OrbbecSDK_ROS1/scripts/results'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # 读取图像并记录原始尺寸
    img_name = os.path.basename(args.img_path)
    if not img_check(img_name):
        print(f"{img_name} is not a supported image type")
        sys.exit(1)

    if not os.path.exists(args.img_path):
        print(f"{img_name} is not found")
        sys.exit(1)

    img_src = cv2.imread(args.img_path)
    if img_src is None:
        print(f"Failed to load image {img_name}")
        sys.exit(1)

    print(f"Original image shape: {img_src.shape}")

    # 预处理图像
    co_helper = COCO_test_helper(enable_letter_box=True)
    img = co_helper.letter_box(im=img_src.copy(), new_shape=(IMG_SIZE[1], IMG_SIZE[0]), pad_color=(114, 114, 114))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    input_data = img

    # 运行推理
    print(f'Inferring on {img_name}')
    outputs = model.run([input_data])
    print("Model outputs:", [output.shape for output in outputs])
    boxes, classes, scores, seg_img = post_process(outputs)

    # 处理结果
    if boxes is not None:
        real_boxs = co_helper.get_real_box(boxes)
        real_segs = co_helper.get_real_seg(seg_img)
        img_p = merge_seg(img_src.copy(), real_segs, classes)

        print(f'\nIMG: {img_name}')
        draw(img_p, real_boxs, scores, classes)

        if args.img_save:
            result_path = os.path.join(result_dir, img_name)
            cv2.imwrite(result_path, img_p)
            print(f'The segmentation results have been saved to {result_path} with shape {img_p.shape}')

            for i in range(len(real_segs)):
                mask = (real_segs[i] * 255).astype(np.uint8)
                mask_path = os.path.join(result_dir, f'mask_{img_name}_{i}.png')
                cv2.imwrite(mask_path, mask)
                print(f'Mask saved to {mask_path} with shape {mask.shape}')

        if args.img_show:
            cv2.imshow("full post process result", img_p)
            cv2.waitKeyEx(0)

    else:
        print(f'No detections for {img_name}')

    # 释放模型
    model.release()