import os
import ujson
from libs.PipeLine import ScopedTiming
from media.sensor import *
from media.display import *
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import gc

display_mode="lcd"
DISPLAY_WIDTH = 960
DISPLAY_HEIGHT = 540

OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16)
OUT_RGB888P_HEIGH = 720

root_path="/sdcard/mp_deployment_source/"
config_path=root_path+"deploy_config.json"
deploy_conf={}
debug_mode=1

def read_deploy_config(config_path):
    # 打开JSON文件以进行读取deploy_config
    with open(config_path, 'r') as json_file:
        try:
            # 从文件中加载JSON数据
            config = ujson.load(json_file)

            # 打印数据（可根据需要执行其他操作）
            #print(config)
        except ValueError as e:
            print("JSON 解析错误:", e)
    return config

# 任务后处理
def softmax(x):
    exp_x = np.exp(x - np.max(x))
    return exp_x / np.sum(exp_x)

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def align_to_8(x):  
        return (x // 8) * 8  # 向下取整对齐

def classification():
    print("start")
    # 使用json读取内容初始化部署变量
    deploy_conf=read_deploy_config(config_path)
    kmodel_name=deploy_conf["kmodel_path"]
    labels=deploy_conf["categories"]
    confidence_threshold=deploy_conf["confidence_threshold"]
    nms_threshold = deploy_conf["nms_threshold"]  # 新增：NMS阈值
    model_input_size = deploy_conf["img_size"]
    num_classes = deploy_conf["num_classes"]
    anchors = deploy_conf["anchors"]  # 新增：锚点配置

    # 初始化模型和传感器
    kpu = nn.kpu()
    kpu.load_kmodel(root_path + kmodel_name)

    cls_idx=-1
    score=0.0
    # 初始化kpu并加载模型
    kpu = nn.kpu()
    kpu.load_kmodel(root_path+kmodel_name)
    # 初始化ai2d用于预处理
    ai2d = nn.ai2d()
    ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
    ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel )
    ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,model_input_size[1],model_input_size[0]])
    # 初始化并配置sensor
    sensor = Sensor()
    sensor.reset()
    # 设置镜像
    sensor.set_hmirror(False)
    # 设置翻转
    sensor.set_vflip(False)
    # 通道0直接给到显示VO，格式为YUV420
    sensor.set_framesize(width = DISPLAY_WIDTH, height = align_to_8(DISPLAY_HEIGHT))
    sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420)
    # 通道2给到AI做算法处理，格式为RGB888
    sensor.set_framesize(width = OUT_RGB888P_WIDTH , height = OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2)
    sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2)
    # 绑定通道0的输出到vo
    sensor_bind_info = sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0)
    Display.bind_layer(**sensor_bind_info, layer = Display.LAYER_VIDEO1)
    if display_mode=="lcd":
        # 设置为ST7701显示，默认960*540
        Display.init(Display.NT35516, to_ide = True)
    else:
        # 设置为LT9611显示，默认1920x1080
        Display.init(Display.LT9611, to_ide = True)

    #创建OSD图像
    osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888)
    # media初始化
    MediaManager.init()
    # 启动sensor
    sensor.run()
    # 初始化变量
    rgb888p_img = None
    ai2d_input_tensor = None
    data = np.ones((1,3,model_input_size[1],model_input_size[0]),dtype=np.uint8)
    ai2d_output_tensor = nn.from_numpy(data)
    if len(labels) != num_classes:
        raise ValueError(f"标签数量({len(labels)})与分类数量({num_classes})不匹配")
    while True:
        with ScopedTiming("total",debug_mode > 0):
            rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2)
            if rgb888p_img.format() == image.RGBP888:
                ai2d_input = rgb888p_img.to_numpy_ref()
                ai2d_input_tensor = nn.from_numpy(ai2d_input)
                # 使用ai2d进行预处理
                ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor)
                # 设置模型推理输入
                kpu.set_input_tensor(0, ai2d_output_tensor)
                # 模型推理
                kpu.run()
                # 获取模型推理输出
                results = []
                for i in range(kpu.outputs_size()):
                    output_data = kpu.get_output_tensor(i)
                    result = output_data.to_numpy()
                    print(f"输出[{i}]形状: {result.shape}")
                    del output_data
                    results.append(result)

                # 目标检测后处理（替换原有的分类逻辑）
                detections = process_detection_results(
                    results, anchors, num_classes, 
                    confidence_threshold, nms_threshold
                )

                osd_img.clear()
                for det in detections:
                    x1, y1, x2, y2, cls_idx, score = det
                    if 0 <= cls_idx < len(labels):
                        label = labels[cls_idx]
                        osd_img.draw_rectangle(x1, y1, x2-x1, y2-y1, color=(0, 255, 0), thickness=2)
                        osd_img.draw_string_advanced(x1, y1-15, 24, 
                            f"{label}: {score:.2f}", color=(0, 255, 0))
                    else:
                        print(f"警告：无效类别索引 {cls_idx}")

                if num_classes>2:
                    softmax_res=softmax(results[0][0])
                    cls_idx=np.argmax(softmax_res)
                    print(f"num_classes: {num_classes}, len(labels): {len(labels)}, cls_idx: {cls_idx}")
                    if softmax_res[cls_idx]>confidence_threshold:
                        score=softmax_res[cls_idx]
                        print("classification result:")
                        print(labels[cls_idx])
                        print("score",score)
                    else:
                        cls_idx=-1
                        score=0.0
                else:
                    sigmoid_res=sigmoid(results[0][0][0])
                    if sigmoid_res>confidence_threshold:
                        cls_idx=1
                        score=sigmoid_res
                        print("classification result:")
                        print(labels[1])
                        print("score",score)
                    else:
                        cls_idx=0
                        score=1-sigmoid_res
                        print("classification result:")
                        print(labels[0])
                        print("score",score)
            osd_img.clear()
            if cls_idx>=0:
                osd_img.draw_string_advanced(5,5,32,"result:"+labels[cls_idx]+" score:"+str(round(score,3)),color=(0,255,0))
            Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3)
            rgb888p_img = None
            gc.collect() #用于需要调用gc.mem_alloc()的内存
    del ai2d_input_tensor
    del ai2d_output_tensor
    #停止摄像头输出
    sensor.stop()
    #去初始化显示设备
    Display.deinit()
    #释放媒体缓冲区
    MediaManager.deinit()
    gc.collect()
    time.sleep(1)
    nn.shrink_memory_pool()
    print("end")
    return 0

# 新增：目标检测后处理函数
def process_detection_results(outputs, anchors, num_classes, conf_thres, nms_thres):
    """处理AnchorBaseDet模型的输出，解析为边界框、类别和置信度"""
    # 这里需要根据具体模型输出格式实现解析逻辑
    # 以下是一个简化示例，实际实现可能需要根据模型输出调整
    
    # 假设第一个输出是边界框偏移，第二个输出是类别得分
    boxes_output = outputs[0]
    scores_output = outputs[1]
    
    # 实际解析逻辑可能更复杂，这里仅作示例
    detections = []
    
    # 遍历所有预测（简化处理，实际需要根据模型输出结构调整）
    for i in range(boxes_output.shape[0]):
        for j in range(boxes_output.shape[1]):
            # 获取边界框偏移和类别得分
            box_offset = boxes_output[i, j]
            class_scores = scores_output[i, j]
            
            # 应用softmax获取类别概率
            class_probs = softmax(class_scores)
            
            # 获取最高概率类别和分数
            cls_idx = np.argmax(class_probs)
            score = class_probs[cls_idx]
            
            # 过滤低置信度预测
            if score > conf_thres:
                # 这里需要根据锚点和偏移计算实际边界框坐标
                # 简化示例，实际计算可能更复杂
                anchor = anchors[i // len(anchors)][j % len(anchors[0]) // 2 * 2: (j % len(anchors[0]) // 2 + 1) * 2]
                x, y, w, h = apply_box_offset(anchor, box_offset)
                
                # 转换为左上角和右下角坐标
                x1 = max(0, int(x - w/2))
                y1 = max(0, int(y - h/2))
                x2 = min(model_input_size[0], int(x + w/2))
                y2 = min(model_input_size[1], int(y + h/2))
                
                detections.append([x1, y1, x2, y2, cls_idx, score])
    
    # 应用非极大值抑制（NMS）
    detections = apply_nms(detections, nms_thres)
    
    return detections

# 新增：边界框偏移应用函数（示例）
def apply_box_offset(anchor, offset):
    """根据锚点和偏移计算实际边界框"""
    # 简化示例，实际实现可能需要根据模型设计调整
    aw, ah = anchor
    dx, dy, dw, dh = offset
    
    x = dx * aw + aw/2
    y = dy * ah + ah/2
    w = aw * np.exp(dw)
    h = ah * np.exp(dh)
    
    return x, y, w, h

# 新增：非极大值抑制函数（示例）
def apply_nms(detections, nms_threshold):
    """应用非极大值抑制过滤重叠边界框"""
    if not detections:
        return []
    
    # 按置信度排序
    detections = sorted(detections, key=lambda x: x[5], reverse=True)
    keep = []
    
    while detections:
        best = detections.pop(0)
        keep.append(best)
        
        # 过滤与当前最佳框重叠度高的框
        detections = [
            det for det in detections 
            if calculate_iou(best, det) < nms_threshold
        ]
    
    return keep

# 新增：计算IoU函数
def calculate_iou(box1, box2):
    """计算两个边界框的交并比"""
    x11, y11, x12, y12, _, _ = box1
    x21, y21, x22, y22, _, _ = box2
    
    # 计算交集区域
    x_inter1 = max(x11, x21)
    y_inter1 = max(y11, y21)
    x_inter2 = min(x12, x22)
    y_inter2 = min(y12, y22)
    
    # 计算交集面积
    width_inter = max(0, x_inter2 - x_inter1)
    height_inter = max(0, y_inter2 - y_inter1)
    area_inter = width_inter * height_inter
    
    # 计算并集面积
    area1 = (x12 - x11) * (y12 - y11)
    area2 = (x22 - x21) * (y22 - y21)
    area_union = area1 + area2 - area_inter
    
    # 计算IoU
    iou = area_inter / max(1e-6, area_union)  # 避免除零
    return iou
if __name__=="__main__":
    classification()

