"""
基础模型类
提供模型加载、推理、输入输出处理等功能
"""
import cv2
import numpy as np
import logging
import time
from pathlib import Path

logger = logging.getLogger(__name__)

try:
    from hobot_dnn import pyeasy_dnn as dnn
    HAS_HOBOT_DNN = True
except ImportError:
    logger.warning("hobot_dnn未找到，将使用CPU模拟模式")
    HAS_HOBOT_DNN = False


class BaseModel:
    """
    基础模型类，封装了模型加载、输入输出张量管理、图像预处理、推理等通用功能。
    """
    
    def __init__(self, model_file: str) -> None:
        """
        初始化模型，加载量化模型文件。
        Args:
            model_file (str): 模型文件路径
        """
        self.model_file = model_file
        self.quantize_model = None
        self.model_input_height = 320
        self.model_input_weight = 320
        
        # 检查模型文件是否存在
        if not Path(model_file).exists():
            logger.error(f"❌ 模型文件不存在: {model_file}")
            raise FileNotFoundError(f"模型文件不存在: {model_file}")
        
        if HAS_HOBOT_DNN:
            self._load_hobot_model(model_file)
        else:
            logger.warning("使用CPU模拟模式，实际项目中需要在RDK-X5平台上运行")
    
    def _load_hobot_model(self, model_file: str):
        """加载地平线BPU模型"""
        try:
            begin_time = time.time()
            self.quantize_model = dnn.load(model_file)
            load_time = time.time() - begin_time
            logger.info(f"✅ 模型加载成功，耗时: {load_time:.2f}秒")
            
            # 打印输入张量信息
            logger.info("-> 输入张量信息")
            for i, quantize_input in enumerate(self.quantize_model[0].inputs):
                logger.info(f"input[{i}]: name={quantize_input.name}, "
                           f"type={quantize_input.properties.dtype}, "
                           f"shape={quantize_input.properties.shape}")
            
            # 打印输出张量信息
            logger.info("-> 输出张量信息")
            for i, quantize_output in enumerate(self.quantize_model[0].outputs):
                logger.info(f"output[{i}]: name={quantize_output.name}, "
                           f"type={quantize_output.properties.dtype}, "
                           f"shape={quantize_output.properties.shape}")
            
            # 记录模型输入的高和宽
            self.model_input_height, self.model_input_weight = self.quantize_model[0].inputs[0].properties.shape[2:4]
            
        except Exception as e:
            logger.error(f"❌ 模型加载失败: {e}")
            raise
    
    def resizer(self, img: np.ndarray) -> np.ndarray:
        """
        将输入图像resize到模型输入大小，并记录缩放比例。
        Args:
            img (np.ndarray): 输入图像
        Returns:
            np.ndarray: resize后的图像
        """
        img_h, img_w = img.shape[0:2]
        self.y_scale, self.x_scale = img_h/self.model_input_height, img_w/self.model_input_weight
        return cv2.resize(img, (self.model_input_height, self.model_input_weight), interpolation=cv2.INTER_NEAREST)
    
    def bgr2nv12(self, bgr_img: np.ndarray) -> np.ndarray:
        """
        将BGR格式图像转换为NV12格式（YUV420半平面），用于模型推理。
        Args:
            bgr_img (np.ndarray): BGR格式图像
        Returns:
            np.ndarray: NV12格式一维数组
        """
        begin_time = time.time()
        bgr_img = self.resizer(bgr_img)
        height, width = bgr_img.shape[0], bgr_img.shape[1]
        area = height * width
        yuv420p = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2YUV_I420).reshape((area * 3 // 2,))
        y = yuv420p[:area]
        uv_planar = yuv420p[area:].reshape((2, area // 4))
        uv_packed = uv_planar.transpose((1, 0)).reshape((area // 2,))
        nv12 = np.zeros_like(yuv420p)
        nv12[:height * width] = y
        nv12[height * width:] = uv_packed
        
        process_time = time.time() - begin_time
        logger.debug(f"BGR转NV12耗时: {process_time*1000:.2f}ms")
        return nv12
    
    def forward(self, input_tensor: np.array):
        """
        执行模型推理。
        Args:
            input_tensor (np.array): 输入张量
        Returns:
            推理输出张量列表
        """
        if not HAS_HOBOT_DNN or self.quantize_model is None:
            logger.warning("模型未加载或运行在CPU模拟模式下")
            # CPU模拟模式，返回模拟输出
            return self._simulate_inference(input_tensor)
        
        begin_time = time.time()
        quantize_outputs = self.quantize_model[0].forward(input_tensor)
        inference_time = time.time() - begin_time
        logger.debug(f"模型推理耗时: {inference_time*1000:.2f}ms")
        return quantize_outputs
    
    def _simulate_inference(self, input_tensor: np.array):
        """CPU模拟推理（仅用于开发测试）"""
        # 模拟输出格式，实际需要真实模型输出
        time.sleep(0.01)  # 模拟推理时间
        return []
    
    def c2numpy(self, outputs):
        """
        将C类型的推理输出转为numpy数组。
        Args:
            outputs: 推理输出张量列表
        Returns:
            list[np.array]: numpy数组列表
        """
        if not HAS_HOBOT_DNN or not outputs:
            return []
        
        begin_time = time.time()
        numpy_outputs = [dnnTensor.buffer for dnnTensor in outputs]
        convert_time = time.time() - begin_time
        logger.debug(f"C转numpy耗时: {convert_time*1000:.2f}ms")
        return numpy_outputs