import base64
import io

import numpy as np
from flask import request
from PIL import Image

from controllers.base_controller import BaseController
from image_processing_modules.baidu_ocr import BaiduOCR
from image_processing_modules.clip_model import ChineseClipModelWrapper
from utils.api_response_utils import create_error_response, create_success_response
from configs.global_config import global_config
from utils.logger_config import get_logger
from utils.numpy_utils import calculate_similarity

logger = get_logger(__name__)


class ImageProcessingController(BaseController):
    """图像处理控制器，提供基于百度OCR的文字识别功能"""

    def __init__(self, import_name):
        """
        初始化控制器

        Args:
            import_name: 导入名称
        """
        super().__init__(
            "图片处理", import_name, url_prefix="/image_processing"
        )
        self.baidu_ocr = BaiduOCR()
        self._clip_model = None

    @property
    def clip_model(self):
        """懒加载CLIP模型，确保只在首次使用时初始化"""
        if self._clip_model is None:
            try:
                # 从配置中获取CLIP模型参数
                model_name = global_config.get("clip.model_name", "OFA-Sys/chinese-clip-vit-base-patch16")
                local_model_path = global_config.get("clip.local_model_path", "")
                offline_mode = global_config.get("clip.offline_mode", True)
                device_config = global_config.get("clip.device", "auto")
                
                # 确定设备
                if device_config == "auto":
                    device = None  # 让模型类自动选择
                else:
                    device = device_config
                
                logger.info(f"初始化CLIP模型: {model_name}, 离线模式: {offline_mode}")
                
                # 在这里进行实际的、耗时的模型初始化
                self._clip_model = ChineseClipModelWrapper(
                    model_name=model_name,
                    device=device,
                    local_model_path=local_model_path if local_model_path else None,
                    offline_mode=offline_mode
                )
                
                logger.info("CLIP模型初始化成功")
                
            except Exception as e:
                error_msg = f"初始化CLIP模型失败: {str(e)}"
                logger.error(error_msg)
                
                # 尝试使用备用模型
                try:
                    fallback_model = global_config.get("clip.fallback_model", "openai/clip-vit-base-patch32")
                    logger.warning(f"尝试使用备用模型: {fallback_model}")
                    
                    self._clip_model = ChineseClipModelWrapper(
                        model_name=fallback_model,
                        device=device,
                        local_model_path=local_model_path if local_model_path else None,
                        offline_mode=offline_mode
                    )
                    
                    logger.info(f"备用CLIP模型初始化成功: {fallback_model}")
                    
                except Exception as fallback_error:
                    logger.error(f"备用模型也初始化失败: {str(fallback_error)}")
                    # 设置为None，这样在后续使用时会抛出更明确的错误
                    self._clip_model = None
                    raise RuntimeError(f"无法初始化任何CLIP模型。主模型错误: {error_msg}，备用模型错误: {str(fallback_error)}")
                    
        return self._clip_model

    def register_routes(self):
        """注册路由"""

        @self.route("/recognize_text_from_base64", methods=["POST"])
        @self.validate_json_request()
        def api_recognize_text_from_base64():
            """从Base64编码的图像中识别文字
            接收Base64编码的图像字符串，使用百度OCR服务进行文字识别。
            ---
            tags:
              - 图片处理
            parameters:
              - in: body
                name: body
                description: 请求体
                required: true
                schema:
                  type: object
                  required:
                    - image_base64
                  properties:
                    image_base64:
                      type: string
                      description: 图像的Base64编码字符串。
                      example: "/9j/4AAQSkZJRgABAQEAAAAAAAD/4..."
            responses:
              200:
                description: 文字识别成功
                schema:
                  $ref: "#/definitions/SuccessResponse"
              400:
                description: 无效的请求参数
                schema:
                  $ref: "#/definitions/ErrorResponse"
              500:
                description: 服务器内部错误
                schema:
                  $ref: "#/definitions/ErrorResponse"
            """
            try:
                data = request.parsed_data
                image_base64 = data.get("image_base64")

                if not image_base64 or not isinstance(image_base64, str):
                    return create_error_response(
                        400, "参数'image_base64'是必需的，且必须是字符串"
                    )

                result = self.baidu_ocr.recognize_text(image_base64, is_path=False)

                if not result or "error_code" in result:
                    error_msg = result.get("error_msg", "未知错误")
                    return create_error_response(500, f"OCR识别失败: {error_msg}")

                return create_success_response("文字识别成功", result)

            except Exception as e:
                return create_error_response(500, f"处理请求时出错: {str(e)}")

        @self.route("/recognize_text_from_url", methods=["POST"])
        @self.validate_json_request()
        def api_recognize_text_from_url():
            """从图像URL中识别文字
            接收一个公开可访问的图像URL，使用百度OCR服务进行文字识别。
            ---
            tags:
              - 图片处理
            parameters:
              - in: body
                name: body
                description: 请求体
                required: true
                schema:
                  type: object
                  required:
                    - image_url
                  properties:
                    image_url:
                      type: string
                      description: 图像的URL地址。
                      example: "https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png"
            responses:
              200:
                description: 文字识别成功
                schema:
                  $ref: "#/definitions/SuccessResponse"
              400:
                description: 无效的请求参数
                schema:
                  $ref: "#/definitions/ErrorResponse"
              500:
                description: 服务器内部错误
                schema:
                  $ref: "#/definitions/ErrorResponse"
            """
            try:
                data = request.parsed_data
                image_url = data.get("image_url")

                if not image_url or not isinstance(image_url, str):
                    return create_error_response(
                        400, "参数'image_url'是必需的，且必须是字符串"
                    )

                result = self.baidu_ocr.recognize_text_from_url(image_url)

                if not result or "error_code" in result:
                    error_msg = result.get("error_msg", "未知错误")
                    return create_error_response(500, f"OCR识别失败: {error_msg}")

                return create_success_response("文字识别成功", result)

            except Exception as e:
                return create_error_response(500, f"处理请求时出错: {str(e)}")

        @self.route("/embedding/image", methods=["POST"])
        @self.validate_json_request()
        def api_get_image_embedding():
            """提取图像的CLIP特征向量
            接收Base64编码的图像，返回其对应的CLIP模型特征向量。
            ---
            tags:
              - 图片处理
            parameters:
              - in: body
                name: body
                description: 请求体
                required: true
                schema:
                  type: object
                  required:
                    - image_base64
                  properties:
                    image_base64:
                      type: string
                      description: 图像的Base64编码字符串。
                      example: "/9j/4AAQSkZJRgABAQEAAAAAAAD/4..."
            responses:
              200:
                description: 图像特征提取成功
                schema:
                  type: object
                  properties:
                    code:
                      type: integer
                      example: 200
                    message:
                      type: string
                      example: "图像特征提取成功"
                    data:
                      type: object
                      properties:
                        embedding:
                          type: array
                          items:
                            type: number
                            format: float
                          example: [0.01, 0.02, ..., 0.99]
              400:
                description: 无效的请求参数
                schema:
                  $ref: "#/definitions/ErrorResponse"
              500:
                description: 服务器内部错误
                schema:
                  $ref: "#/definitions/ErrorResponse"
            """
            try:
                data = request.parsed_data
                base64_image = data.get("image_base64")

                if not base64_image or not isinstance(base64_image, str):
                    return create_error_response(400, "参数'image_base64'是必需的")

                # 检查CLIP模型是否可用
                if self._clip_model is None:
                    try:
                        # 尝试初始化模型
                        _ = self.clip_model
                    except Exception as model_error:
                        logger.error(f"CLIP模型初始化失败: {str(model_error)}")
                        return create_error_response(
                            503,
                            "CLIP模型服务不可用，请检查模型配置或网络连接。如果使用离线模式，请确保本地模型文件存在。"
                        )

                # 解码Base64并创建内存中的图像对象
                image_bytes = base64.b64decode(base64_image)
                image = Image.open(io.BytesIO(image_bytes)).convert("RGB")

                # 直接使用模型内部的通用处理方法
                embedding = self.clip_model._process_and_embed(
                    processor_input={"images": image},
                    feature_extractor=self.clip_model.model.get_image_features,
                ).squeeze()

                # 将numpy数组转换为列表以便JSON序列化
                embedding_list = embedding.tolist()

                return create_success_response(
                    "图像特征提取成功", {"embedding": embedding_list}
                )

            except Exception as e:
                logger.error(f"提取图像特征时出错: {str(e)}")
                return create_error_response(500, f"提取图像特征时出错: {str(e)}")

        @self.route("/embedding/text", methods=["POST"])
        @self.validate_json_request()
        def api_get_text_embedding():
            """提取文本的CLIP特征向量
            接收文本字符串，返回其对应的CLIP模型特征向量。
            ---
            tags:
              - 图片处理
            parameters:
              - in: body
                name: body
                description: 请求体
                required: true
                schema:
                  type: object
                  required:
                    - text
                  properties:
                    text:
                      type: string
                      description: 需要提取特征的文本。
                      example: "一只猫坐在垫子上"
            responses:
              200:
                description: 文本特征提取成功
                schema:
                  type: object
                  properties:
                    code:
                      type: integer
                      example: 200
                    message:
                      type: string
                      example: "文本特征提取成功"
                    data:
                      type: object
                      properties:
                        embedding:
                          type: array
                          items:
                            type: number
                            format: float
                          example: [0.01, 0.02, ..., 0.99]
              400:
                description: 无效的请求参数
                schema:
                  $ref: "#/definitions/ErrorResponse"
              500:
                description: 服务器内部错误
                schema:
                  $ref: "#/definitions/ErrorResponse"
            """
            try:
                data = request.parsed_data
                text = data.get("text")

                if not text or not isinstance(text, str):
                    return create_error_response(400, "参数'text'是必需的")

                # 检查CLIP模型是否可用
                if self._clip_model is None:
                    try:
                        # 尝试初始化模型
                        _ = self.clip_model
                    except Exception as model_error:
                        logger.error(f"CLIP模型初始化失败: {str(model_error)}")
                        return create_error_response(
                            503,
                            "CLIP模型服务不可用，请检查模型配置或网络连接。如果使用离线模式，请确保本地模型文件存在。"
                        )

                embedding = self.clip_model.text_to_embedding(text)
                embedding_list = embedding.tolist()

                return create_success_response(
                    "文本特征提取成功", {"embedding": embedding_list}
                )

            except Exception as e:
                logger.error(f"提取文本特征时出错: {str(e)}")
                return create_error_response(500, f"提取文本特征时出错: {str(e)}")

        @self.route("/compare_features", methods=["POST"])
        @self.validate_json_request()
        def api_compare_features():
            """比对两个特征向量的相似度
            接收两个特征向量和相似度阈值，计算它们之间的相似度并返回结果。
            ---
            tags:
              - 图片处理
            parameters:
              - in: body
                name: body
                description: 请求体
                required: true
                schema:
                  type: object
                  required:
                    - similarity_threshold
                    - source_feature
                    - target_feature
                  properties:
                    similarity_threshold:
                      type: number
                      format: float
                      description: 相似度阈值
                      example: 0.6
                    source_feature:
                      type: array
                      items:
                        type: number
                        format: float
                      description: 源特征向量
                      example: [0.24395322799682617, -0.4605165123939514, 0.4141506850719452]
                    target_feature:
                      type: array
                      items:
                        type: number
                        format: float
                      description: 目标特征向量
                      example: [-0.8679306507110596, -0.3318841755390167, -0.9373432993888855]
            responses:
              200:
                description: 特征比对成功
                schema:
                  type: object
                  properties:
                    code:
                      type: integer
                      example: 200
                    message:
                      type: string
                      example: "特征相似度: 23.38%（阈值: 60%）"
                    data:
                      type: object
                      properties:
                        match_rate:
                          type: number
                          format: float
                          example: 23.378930582487783
                        similarity:
                          type: number
                          format: float
                          example: 0.23378930582487784
                        threshold:
                          type: number
                          format: float
                          example: 0.6
              400:
                description: 无效的请求参数
                schema:
                  $ref: "#/definitions/ErrorResponse"
              500:
                description: 服务器内部错误
                schema:
                  $ref: "#/definitions/ErrorResponse"
            """
            try:
                data = request.parsed_data
                similarity_threshold = data.get("similarity_threshold")
                source_feature = data.get("source_feature")
                target_feature = data.get("target_feature")

                # 参数验证
                if similarity_threshold is None or not isinstance(similarity_threshold, (int, float)):
                    return create_error_response(400, "参数'similarity_threshold'是必需的，且必须是数字")

                if not source_feature or not isinstance(source_feature, list):
                    return create_error_response(400, "参数'source_feature'是必需的，且必须是数组")

                if not target_feature or not isinstance(target_feature, list):
                    return create_error_response(400, "参数'target_feature'是必需的，且必须是数组")

                if len(source_feature) != len(target_feature):
                    return create_error_response(400, "源特征向量和目标特征向量的维度必须相同")

                # 转换为numpy数组
                source_embedding = np.array(source_feature, dtype=np.float32)
                target_embedding = np.array(target_feature, dtype=np.float32)

                # 计算余弦相似度
                similarity = calculate_similarity(source_embedding, target_embedding, metric="cosine")
                
                # 确保相似度在合理范围内
                similarity = max(-1.0, min(1.0, similarity))
                
                # 转换为百分比
                match_rate = similarity * 100

                # 构造响应数据
                response_data = {
                    "match_rate": match_rate,
                    "similarity": similarity,
                    "threshold": similarity_threshold
                }

                # 构造消息
                message = f"特征相似度: {match_rate:.2f}%（阈值: {similarity_threshold*100:.0f}%）"

                return create_success_response(message, response_data)

            except Exception as e:
                logger.error(f"特征比对时出错: {str(e)}")
                return create_error_response(500, f"特征比对时出错: {str(e)}")
