import openai
# import aiohttp  # VLM流式调用已注释，未使用
import time
import asyncio
import json
from config.settings import config
from typing import Dict, Any, Optional
from utils.logger import (
    emobot_logger,
    log_function_call,
    log_function_result,
    log_function_error,
)
from utils.performance_logger import (
    log_api_call,
    track_step
)


class LLMService:
    def __init__(self):
        self.api_key = config.LLM_API_KEY
        self.base_url = config.LLM_BASE_URL
        self.model = getattr(config, 'LLM_MODEL', 'qwen-turbo')  # 默认使用qwen-turbo以提升响应速度
        # self.vlm_api_key = config.VLM_API_KEY  # VLM已注释
        # self.vlm_api_url = (  # VLM已注释
        #     config.VLM_API_URL if hasattr(config, "VLM_API_URL") else None
        # )
        self.client = None
        self.logger = emobot_logger.get_logger()

    def _get_client(self):
        """Lazy initialization of OpenAI client"""
        if self.client is None:
            if not self.api_key:
                raise ValueError("LLM_API_KEY environment variable is required")

            self.client = openai.OpenAI(
                api_key=self.api_key, base_url=self.base_url if self.base_url else None
            )
        return self.client

    def chat_completion(self, messages, model=None):
        """Generate chat completion using OpenAI API"""
        start_time = time.time()
        
        # 使用默认模型如果没有指定
        if model is None:
            model = self.model

        # 记录LLM API调用开始
        log_api_call(
            api_name="QWEN_CHAT_COMPLETION",
            api_type="LLM",
            request_data={
                "model": model,
                "messages_count": len(messages),
                "messages_preview": [{"role": msg.get("role"), "content_length": len(msg.get("content", ""))} for msg in
                                     messages]
            }
        )

        try:
            client = self._get_client()
            response = client.chat.completions.create(model=model, messages=messages)
            result = response.choices[0].message.content.strip()

            duration = (time.time() - start_time) * 1000

            # 记录LLM API调用成功
            log_api_call(
                api_name="OPENAI_CHAT_COMPLETION",
                api_type="LLM",
                request_data={
                    "model": model,
                    "messages_count": len(messages),
                    "messages_preview": [{"role": msg.get("role"), "content_length": len(msg.get("content", ""))} for
                                         msg in messages]
                },
                response_data={
                    "response_length": len(result),
                    "response_preview": result[:100] + "..." if len(result) > 100 else result
                },
                duration_ms=duration
            )

            return result
        except Exception as e:
            duration = (time.time() - start_time) * 1000
            print(f"Error in LLM chat completion: {e}")

            # 记录LLM API调用失败
            log_api_call(
                api_name="OPENAI_CHAT_COMPLETION",
                api_type="LLM",
                request_data={
                    "model": model,
                    "messages_count": len(messages),
                    "messages_preview": [{"role": msg.get("role"), "content_length": len(msg.get("content", ""))} for
                                         msg in messages]
                },
                duration_ms=duration,
                error=str(e)
            )

            return None

    # VLM相关代码已注释，云侧不再进行场景识别
    # async def call_vlm_model(self, prompt: str, image_url: str = None) -> str:
    #     """Call external VLM model to generate response (async version)"""
    #     # ... VLM代码已注释 ...

    # async def call_vlm_model(self, prompt: str, image_url: str = None) -> str:
    #     """替代VLM的LLM调用方法 - 已注释，云侧不再进行场景识别"""
    #     start_time = time.time()
    #     log_function_call(
    #         "call_vlm_model_llm_replacement",
    #         {
    #             "prompt_length": len(prompt),
    #             "has_image": image_url is not None,
    #         },
    #     )

    #     # 记录LLM替代VLM的API调用开始
    #     log_api_call(
    #         api_name="LLM_REPLACEMENT_FOR_VLM",
    #         api_type="LLM",
    #         request_data={
    #             "prompt_length": len(prompt),
    #             "has_image": image_url is not None,
    #             "image_url": image_url,
    #             "note": "VLM temporarily replaced with LLM"
    #         }
    #     )

    #     try:
    #         # 如果有图片URL，在提示词中说明无法处理图片
    #         if image_url:
    #             enhanced_prompt = f"{prompt}\n\n[注意：当前系统暂时无法处理图片，仅基于文本内容进行回复。图片URL: {image_url}]"
    #             self.logger.info(f"VLM替代调用 - 检测到图片但无法处理: {image_url}")
    #         else:
    #             enhanced_prompt = prompt

    #         # 使用LLM进行文本处理
    #         messages = [{"role": "user", "content": enhanced_prompt}]
    #         result = self.chat_completion(messages)

    #         if result is None:
    #             result = "Sorry, there was an error processing your request."

    #         duration = (time.time() - start_time) * 1000

    #         self.logger.info(
    #             f"LLM替代VLM调用完成 (length: {len(result)}, duration: {duration:.2f}ms)"
    #         )
    #         log_function_result("call_vlm_model_llm_replacement", result, duration)

    #         # 记录LLM替代VLM的API调用成功
    #         log_api_call(
    #             api_name="LLM_REPLACEMENT_FOR_VLM",
    #             api_type="LLM",
    #             request_data={
    #                 "prompt_length": len(prompt),
    #                 "has_image": image_url is not None,
    #                 "image_url": image_url,
    #                 "note": "VLM temporarily replaced with LLM"
    #             },
    #             response_data={
    #                 "response_length": len(result),
    #                 "response_preview": result[:100] + "..." if len(result) > 100 else result
    #             },
    #             duration_ms=duration
    #         )

    #         return result

    #     except Exception as e:
    #         duration = (time.time() - start_time) * 1000
    #         self.logger.error(f"Error in LLM replacement for VLM: {e}")
    #         log_function_error(
    #             "call_vlm_model_llm_replacement",
    #             e,
    #             {
    #                 "prompt_length": len(prompt),
    #                 "has_image": image_url is not None,
    #             },
    #         )

    #         # 记录LLM替代VLM的API调用失败
    #         log_api_call(
    #             api_name="LLM_REPLACEMENT_FOR_VLM",
    #             api_type="LLM",
    #             request_data={
    #                 "prompt_length": len(prompt),
    #                 "has_image": image_url is not None,
    #                 "image_url": image_url,
    #                 "note": "VLM temporarily replaced with LLM"
    #             },
    #             duration_ms=duration,
    #             error=str(e)
    #         )

    #         return "Sorry, there was an error processing your request."

    # VLM流式调用代码已注释，云侧不再进行场景识别
    # async def call_vlm_model_streaming(self, prompt: str, image_url: str = None):
    #     """Call external VLM model with streaming response - 简化版本"""
    #     # ... VLM流式代码已注释 ...

    # async def call_vlm_model_streaming(self, prompt: str, image_url: str = None):
    #     """替代VLM流式调用的LLM方法 - 已注释，云侧不再进行场景识别"""
    #     start_time = time.time()
    #     log_function_call(
    #         "call_vlm_model_streaming_llm_replacement",
    #         {
    #             "prompt_length": len(prompt),
    #             "has_image": image_url is not None,
    #         },
    #     )

    #     try:
    #         # 如果有图片URL，在提示词中说明无法处理图片
    #         if image_url:
    #             enhanced_prompt = f"{prompt}\n\n[注意：当前系统暂时无法处理图片，仅基于文本内容进行回复。图片URL: {image_url}]"
    #             self.logger.info(f"VLM流式替代调用 - 检测到图片但无法处理: {image_url}")
    #         else:
    #             enhanced_prompt = prompt

    #         # 使用LLM进行文本处理，模拟流式输出
    #         messages = [{"role": "user", "content": enhanced_prompt}]
    #         result = self.chat_completion(messages)

    #         if result is None:
    #             result = "Sorry, there was an error processing your request."

    #         # 模拟流式输出：将结果分块返回
    #         chunk_size = 50  # 每次返回50个字符
    #         for i in range(0, len(result), chunk_size):
    #             chunk = result[i:i + chunk_size]
    #             yield chunk
    #             # 添加小延迟模拟流式效果
    #             await asyncio.sleep(0.05)

    #         duration = (time.time() - start_time) * 1000
    #         self.logger.info(
    #             f"LLM替代VLM流式调用完成 (length: {len(result)}, duration: {duration:.2f}ms)"
    #         )
    #         log_function_result(
    #             "call_vlm_model_streaming_llm_replacement", "streaming_complete", duration
    #         )

    #     except Exception as e:
    #         duration = (time.time() - start_time) * 1000
    #         self.logger.error(f"Error in LLM replacement for VLM streaming: {e}")
    #         log_function_error(
    #             "call_vlm_model_streaming_llm_replacement",
    #             e,
    #             {
    #                 "prompt_length": len(prompt),
    #                 "has_image": image_url is not None,
    #             },
    #         )
    #         yield f"Error: {str(e)}"

    async def chat_completion_streaming(self, messages, model=None):
        """Generate streaming chat completion using OpenAI API"""
        start_time = time.time()
        
        # 使用默认模型如果没有指定
        if model is None:
            model = self.model
        
        log_function_call(
            "chat_completion_streaming",
            {
                "model": model,
                "messages_count": len(messages),
                "messages_preview": [
                    {"role": msg.get("role"), "content_length": len(msg.get("content", ""))}
                    for msg in messages
                ],
            },
        )

        try:
            client = self._get_client()
            
            # 记录API调用开始
            log_api_call(
                "QWEN_CHAT_COMPLETION",
                "LLM",
                {
                    "model": model,
                    "messages_count": len(messages),
                    "messages_preview": [
                        {"role": msg.get("role"), "content_length": len(msg.get("content", ""))}
                        for msg in messages
                    ],
                },
                {},
                None,
                None,
            )

            # 调用流式API
            stream = client.chat.completions.create(
                model=model,
                messages=messages,
                stream=True,
                temperature=0.7,
                max_tokens=1000
            )

            # 流式返回结果
            for chunk in stream:
                if chunk.choices[0].delta.content is not None:
                    content = chunk.choices[0].delta.content
                    yield content

            duration = (time.time() - start_time) * 1000
            
            # 记录API调用完成
            log_api_call(
                "OPENAI_CHAT_COMPLETION",
                "LLM",
                {
                    "model": model,
                    "messages_count": len(messages),
                    "messages_preview": [
                        {"role": msg.get("role"), "content_length": len(msg.get("content", ""))}
                        for msg in messages
                    ],
                },
                {"response_length": "streaming", "response_preview": "streaming"},
                duration,
                None,
            )

            self.logger.info(f"流式LLM调用完成 (duration: {duration:.2f}ms)")
            log_function_result("chat_completion_streaming", "streaming_complete", duration)

        except Exception as e:
            duration = (time.time() - start_time) * 1000
            self.logger.error(f"流式LLM调用失败: {e}")
            log_function_error("chat_completion_streaming", str(e), duration)
            
            # 记录API调用错误
            log_api_call(
                "OPENAI_CHAT_COMPLETION",
                "LLM",
                {
                    "model": model,
                    "messages_count": len(messages),
                    "messages_preview": [
                        {"role": msg.get("role"), "content_length": len(msg.get("content", ""))}
                        for msg in messages
                    ],
                },
                {},
                duration,
                str(e),
            )
            
            yield f"抱歉，流式响应出现错误：{str(e)}"


llm_service = LLMService()
