"""OCR引擎模块"""

import base64
import logging
import os
import re

import dashscope
from dashscope import MultiModalConversation, Generation

from config.settings import DASHSCOPE_API_KEY


# 简化日志设置，避免导入问题
def setup_logger(name):
    """
    简化版日志设置函数，避免打包时的导入问题
    
    Args:
        name (str): 日志名称
        
    Returns:
        logging.Logger: 配置好的日志记录器
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)

    # 避免重复添加处理器
    if not logger.handlers:
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    return logger


logger = setup_logger(__name__)


class ScreenshotOCR:
    """屏幕截图OCR识别类"""

    def __init__(self):
        self.setup_directories()
        # 设置API Key，支持pt开头的云效API Key
        if DASHSCOPE_API_KEY:
            dashscope.api_key = DASHSCOPE_API_KEY
        else:
            raise ValueError("未设置API Key，请检查配置")

    def setup_directories(self):
        """创建必要的目录"""
        from config.settings import LOG_DIR
        for directory in [LOG_DIR]:
            if not os.path.exists(directory):
                os.makedirs(directory)

    def image_to_base64(self, image):
        """
        将PIL图像转换为base64编码字符串

        Args:
            image (PIL.Image): 要转换的图像

        Returns:
            str: base64编码的图像字符串
        """
        import io
        buffered = io.BytesIO()
        image.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
        return img_str

    def recognize_with_tongyi(self, image, mode='auto'):
        """
        使用通义灵码OCR识别图像中的内容

        Args:
            image (PIL.Image): 要识别的图像
            mode (str): 识别模式 ('digits', 'text', 'auto')

        Returns:
            str: 识别结果
        """
        try:
            # 检查API Key是否设置
            if not dashscope.api_key:
                logger.error("API Key未设置")
                return "错误：API Key未设置"

            # 将图像转换为base64
            img_base64 = self.image_to_base64(image)

            # 构造消息内容
            messages = [
                {
                    "role": "user",
                    "content": [
                        {
                            "image": f"data:image/png;base64,{img_base64}"
                        },
                        {
                            "text": self._get_prompt_by_mode(mode)
                        }
                    ]
                }
            ]

            # 调用通义千问VL OCR模型
            response = MultiModalConversation.call(
                model='qwen-vl-ocr',
                messages=messages
            )

            if response.status_code == 200:
                # 提取识别文本
                result_text = self._extract_text_from_response(response, mode)
                logger.info(f"通义灵码OCR识别结果: {result_text}")
                return result_text
            else:
                error_msg = f"OCR识别请求失败: {response.status_code}, {response.code}, {response.message}"
                logger.error(error_msg)
                return f"错误：{response.message}"

        except Exception as e:
            error_msg = f"通义灵码OCR识别时出错: {e}"
            logger.error(error_msg)
            return f"错误：{e}"

    def _get_prompt_by_mode(self, mode):
        """
        根据模式获取相应的提示词

        Args:
            mode (str): 识别模式

        Returns:
            str: 提示词
        """
        if mode == 'digits':
            return "请识别图像中的数字内容，只返回数字，不要有任何其他文字。"
        elif mode == 'text':
            return "请识别图像中的文本内容。"
        else:  # auto mode
            return "请识别图像中的所有内容。"

    def _extract_text_from_response(self, response, mode):
        """
        从响应中提取文本

        Args:
            response: DashScope API响应
            mode (str): 识别模式

        Returns:
            str: 提取的文本
        """
        try:
            # 获取响应内容
            output = response.output
            if 'choices' in output:
                choices = output['choices']
                if len(choices) > 0 and 'message' in choices[0]:
                    message = choices[0]['message']
                    if 'content' in message:
                        content = message['content']
                        if isinstance(content, list):
                            # 处理列表形式的内容
                            texts = []
                            for item in content:
                                if isinstance(item, dict) and 'text' in item:
                                    texts.append(item['text'])
                            result_text = ''.join(texts)
                        elif isinstance(content, str):
                            # 处理字符串形式的内容
                            result_text = content
                        else:
                            result_text = str(content)

                        # 根据模式处理结果
                        if mode == 'digits':
                            # 只提取数字
                            result_text = re.sub(r'[^0-9]', '', result_text)

                        return result_text.strip()
            return ""
        except Exception as e:
            logger.error(f"提取文本时出错: {e}")
            return ""

    def translate_text(self, text, target_language="中文"):
        """
        使用通义千问翻译文本
        
        Args:
            text (str): 要翻译的文本
            target_language (str): 目标语言
            
        Returns:
            str: 翻译结果
        """
        try:
            # 检查API Key是否设置
            if not dashscope.api_key:
                logger.error("API Key未设置")
                return "错误：API Key未设置"

            # 检查文本是否为空
            if not text or not text.strip():
                return "无内容可翻译"

            # 构造消息内容
            messages = [
                {
                    "role": "user",
                    "content": f"请将下面的文本翻译成{target_language}，只需要返回翻译结果，不要添加任何其他内容或解释：\n\n{text}"
                }
            ]

            # 调用通义千问大模型进行翻译
            response = Generation.call(
                model='qwen-plus',
                messages=messages
            )

            logger.info(f"翻译API完整响应: {response}")

            if response.status_code == 200:
                # 提取翻译结果
                if (hasattr(response, 'output') and
                        response.output and
                        hasattr(response.output, 'choices') and
                        response.output.choices):

                    choices = response.output.choices
                    if isinstance(choices, list) and len(choices) > 0:
                        choice = choices[0]
                        if (hasattr(choice, 'message') and
                                choice.message and
                                hasattr(choice.message, 'content')):
                            result_text = choice.message.content
                            if result_text and isinstance(result_text, str):
                                logger.info(f"翻译结果: {result_text}")
                                return result_text.strip()

                # 尝试从text字段获取结果
                if (hasattr(response, 'output') and
                        response.output and
                        hasattr(response.output, 'text')):
                    result_text = response.output.text
                    if result_text and isinstance(result_text, str):
                        logger.info(f"翻译结果(从text字段): {result_text}")
                        return result_text.strip()

                # 如果以上方式都无法提取结果，返回错误信息
                logger.error("无法从响应中提取翻译结果")
                return "翻译失败：无法提取翻译结果"
            else:
                error_msg = f"翻译请求失败: {response.status_code}, {response.code}, {response.message}"
                logger.error(error_msg)
                return f"翻译失败：{response.message}"

        except Exception as e:
            error_msg = f"翻译时出错: {e}"
            logger.error(error_msg)
            return f"翻译失败：{e}"

    def recognize_auto(self, image):
        """
        自动识别图像中的内容

        Args:
            image (PIL.Image): 要识别的图像

        Returns:
            str: 识别结果
        """
        return self.recognize_with_tongyi(image, 'auto')
