"""
LLM服务
提供智谱大语言模型API调用服务
"""

import asyncio
import logging
import time
import traceback
from typing import Dict, Any, List, AsyncGenerator
from zhipuai import ZhipuAI

from config.settings import settings

logger = logging.getLogger(__name__)


class LLMService:
    """智谱LLM服务类"""
    
    def __init__(self, provider: str = "zhipu"):
        """
        初始化智谱LLM服务
        
        Args:
            provider: LLM提供商，固定为 "zhipu"
        """
        self.provider = provider
        self.client = None
        
        # 智谱API配置
        self.config = {
            "api_key": settings.ZHIPU_API_KEY,
            "base_url": settings.ZHIPU_CHAT_URL,
            "model": settings.ZHIPU_MODEL,
            "timeout": 60000,
            "max_retries": 3
        }
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        self.client = ZhipuAI(api_key=self.config["api_key"])
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        self.client = None
    
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        stream: bool = True,
    ) -> Dict[str, Any]:
        """
        聊天补全
        
        Args:
            messages: 消息列表
            stream: 是否流式响应

        Returns:
            LLM响应结果
        """
        try:
            return await self._call_zhipu(messages, stream)
        except Exception as e:
            logger.error(f"智谱LLM调用失败: {str(e)}")
            raise

    async def knowledge_base_chat(
        self,
        query: str,
        history: List[Dict[str, str]] = None,
        stream: bool = True,
    ) -> Dict[str, Any]:
        """
        知识库聊天接口（智谱暂不支持知识库，使用普通聊天）
        
        Args:
            query: 查询内容
            history: 对话历史
            stream: 是否流式响应
            
        Returns:
            聊天响应结果
        """
        try:
            # 构建消息列表
            messages = []
            
            # 添加历史对话
            if history:
                for msg in history:
                    if msg.get("role") in ["user", "assistant"]:
                        messages.append({
                            "role": msg["role"],
                            "content": msg["content"]
                        })
            
            # 添加当前查询
            messages.append({
                "role": "user",
                "content": query
            })
            
            # 调用智谱API
            return await self._call_zhipu(
                messages=messages,
                stream=stream
            )
        except Exception as e:
            logger.error(f"知识库聊天调用失败: {str(e)}")
            raise
    
    async def _call_zhipu(
        self,
        messages: List[Dict[str, str]],
        stream: bool
    ) -> Dict[str, Any]:
        """调用智谱API"""
        try:
            logger.info(f"发送智谱请求，模型: {self.config['model']}")
            logger.info(f"消息数量: {len(messages)}")
            logger.info(f"流式响应: {stream}")

            # 构建请求参数 - 与成功测试类保持一致，只使用必要参数
            request_params = {
                "model": self.config["model"],
                "messages": messages,
                "stream": stream,
            }

            # 添加调试日志
            logger.info(f"请求参数: {request_params}")
            logger.info(f"消息格式: {messages}")

            max_retries = self.config.get("max_retries", 3)
            retry_delay = 2

            for attempt in range(max_retries):
                try:
                    logger.info(f"发送智谱请求 (尝试 {attempt + 1}/{max_retries})...")

                    # 调用智谱API - 使用同步调用，与成功测试类保持一致
                    response = self.client.chat.completions.create(**request_params)
                    
                    if stream:
                        # 处理流式响应
                        content = await self._process_zhipu_stream_response(response)
                    else:
                        # 处理非流式响应
                        content = response.choices[0].message.content
                    
                    return {
                        "content": content,
                        "usage": getattr(response, 'usage', {}),
                        "model": self.config["model"]
                    }

                except Exception as e:
                    logger.error(f"智谱请求出错 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                    logger.error(f"错误类型: {type(e).__name__}")
                    logger.error(f"错误详情: {traceback.format_exc()}")
                    
                    if attempt < max_retries - 1:
                        await asyncio.sleep(retry_delay)
                        continue
                    raise Exception(f"发送智谱请求时出错: {str(e)}")

            raise Exception("达到最大重试次数，智谱请求失败")
                
        except Exception as e:
            logger.error(f"智谱API调用失败: {str(e)}")
            raise

    async def _process_zhipu_stream_response(self, response) -> str:
        """处理智谱的流式响应"""
        logger.info("处理智谱流式响应...")
        full_text = ""
        chunk_count = 0
        start_time = time.time()

        try:
            logger.info("开始读取响应内容...")
            # 同步迭代流式响应
            for chunk in response:
                chunk_count += 1
                current_time = time.time()

                try:
                    print(f"\r已接收 {chunk_count} 个数据块 | 文本长度: {len(full_text)}", end="")
                    
                    # 检查是否有内容
                    if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                        chunk_text = chunk.choices[0].delta.content
                        if chunk_text:
                            full_text += chunk_text
                            logger.info(f"数据块 {chunk_count}: 内容: {chunk_text[:100]}...")
                    
                    # 检查是否结束
                    if hasattr(chunk.choices[0], 'finish_reason') and chunk.choices[0].finish_reason:
                        logger.info(f"数据块 {chunk_count}: 收到结束标记，原因: {chunk.choices[0].finish_reason}")
                        break

                except Exception as e:
                    logger.error(f"数据块 {chunk_count}: 处理出错: {str(e)}")
                    logger.error(f"数据块 {chunk_count}: 错误类型: {type(e).__name__}")
                    continue

        except Exception as e:
            logger.error(f"处理智谱流式响应时出错: {str(e)}")
            logger.error(f"错误类型: {type(e).__name__}")
            logger.error(f"错误详情: {traceback.format_exc()}")
            raise

        total_time = time.time() - start_time
        logger.info(f"流式响应处理完成，总数据块: {chunk_count}, 文本长度: {len(full_text)}, 总耗时: {total_time:.2f}s")
        return full_text

    async def stream_chat_completion(
        self,
        messages: List[Dict[str, str]],
        web_search_enabled: bool = False
    ) -> "AsyncGenerator[dict, None]":
        """
        智谱流式响应生成器，每收到一段内容就yield
        文本包格式: {"type": "text", "data": "..."}
        尾包格式: {"type": "web_search", "data": [ ... ]}
        """
        request_params = {
            "model": self.config["model"],
            "messages": messages,
            "stream": True,
        }
        web_search_results = []
        # 如果启用联网搜索，添加tools参数
        if web_search_enabled:
            request_params["tools"] = [
                {
                    "type": "web_search",
                    "web_search": {
                        "searchEngine": "search_std",
                        "enable": True,
                        "search_result": True
                    }
                }
            ]
            logger.info("启用联网搜索功能")
        else:
            logger.info("使用标准聊天模式，未启用联网搜索")
        response = self.client.chat.completions.create(**request_params)
        for chunk in response:
            # 文本内容
            if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                chunk_text = chunk.choices[0].delta.content
                if chunk_text:
                    yield {"type": "text", "data": chunk_text}
            # 解析web_search（直接属性或extra_json）
            web_search_list = []
            if hasattr(chunk, 'web_search') and chunk.web_search:
                web_search_list = chunk.web_search
            elif hasattr(chunk, 'extra_json') and chunk.extra_json and 'web_search' in chunk.extra_json:
                web_search_list = chunk.extra_json['web_search']
            # 结构化收集
            for item in web_search_list:
                web_search_results.append({
                    "icon": item.get("icon", ""),
                    "title": item.get("title", ""),
                    "link": item.get("link", ""),
                    "media": item.get("media", ""),
                    "publish_date": item.get("publish_date", ""),
                    "content": item.get("content", "")
                })
        # 尾包返回web_search_results
        if web_search_results:
            yield {"type": "web_search", "data": web_search_results}
