import asyncio
import json
from typing import List, Dict, Any, Optional, AsyncGenerator, Union
from litellm import acompletion
from config import settings
import logging

logger = logging.getLogger(__name__)

class LLMService:
    def __init__(self):
        self.model = settings.LLM_MODEL
        self.api_base = settings.LLM_API_BASE
        self.api_key = settings.DASHSCOPE_API_KEY
        self.temperature = settings.LLM_TEMPERATURE
        self.max_tokens = settings.LLM_MAX_TOKENS
    
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        stream: bool = False,
        **kwargs
    ) -> Dict[str, Any]:
        """基础聊天完成接口"""
        try:
            response = await acompletion(
                model=self.model,
                api_base=self.api_base,
                api_key=self.api_key,
                messages=messages,
                temperature=temperature or self.temperature,
                max_tokens=max_tokens or self.max_tokens,
                stream=stream,
                **kwargs
            )
            return response
        except Exception as e:
            logger.error(f"LLM调用失败: {e}")
            raise
    
    async def stream_chat_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """流式聊天完成接口"""
        try:
            response = await acompletion(
                model=self.model,
                api_base=self.api_base,
                api_key=self.api_key,
                messages=messages,
                temperature=temperature or self.temperature,
                max_tokens=max_tokens or self.max_tokens,
                stream=True,
                **kwargs
            )
            
            async for chunk in response:
                if chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
                    
        except Exception as e:
            logger.error(f"流式LLM调用失败: {e}")
            raise
    
    async def intent_recognition(
        self,
        user_query: str,
        available_agents: List[Dict[str, Any]],
        context: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """意图识别 - 选择合适的Agent"""
        
        agent_descriptions = "\n".join([
            f"- {agent['name']}: {agent['description']} (关键词: {', '.join(agent['keywords'])})"
            for agent in available_agents
        ])
        
        context_str = ""
        if context:
            # 将SessionContext转换为可序列化的字典
            context_dict = {
                "session_id": context.session_id,
                "user_info": context.user_info.dict() if context.user_info else None,
                "tenant_info": context.tenant_info.dict() if context.tenant_info else None,
                "created_at": context.created_at.isoformat() if context.created_at else None,
                "last_activity": context.last_activity.isoformat() if context.last_activity else None,
                "workflow_state": context.workflow_state,
                "skill_selected": context.skill_selected,
                "enable_network": context.enable_network
            }
            context_str = f"\n当前上下文: {json.dumps(context_dict, ensure_ascii=False, indent=2)}"
        
        prompt = f"""你是一个智能助手的意图识别模块。请分析用户查询并选择最合适的Agent来处理。

可用的Agent列表:
{agent_descriptions}

用户查询: "{user_query}"{context_str}

请返回JSON格式的结果，包含以下字段:
{{
    "selected_agent": "选中的Agent名称",
    "confidence": 0.95,
    "reasoning": "选择理由",
    "extracted_entities": {{
        "entity_type": "entity_value"
    }}
}}

如果无法确定合适的Agent，请选择"general_chat"。"""
        
        messages = [{"role": "user", "content": prompt}]
        
        try:
            response = await self.chat_completion(messages, temperature=0.1)
            
            # 处理不同的响应格式
            content = ""
            if hasattr(response, 'choices') and response.choices:
                # OpenAI格式响应
                content = response.choices[0].message.content
            elif isinstance(response, dict) and 'choices' in response:
                # 字典格式响应
                content = response['choices'][0]['message']['content']
            elif isinstance(response, str):
                # 直接字符串响应
                content = response
            else:
                logger.error(f"未知的响应格式: {type(response)}, 内容: {response}")
                content = str(response)  # 作为后备方案
            
            # 尝试解析JSON
            try:
                result = json.loads(content)
                return result
            except json.JSONDecodeError:
                # 如果解析失败，返回默认结果
                return {
                    "selected_agent": "general_chat",
                    "confidence": 0.5,
                    "reasoning": "无法解析LLM响应",
                    "extracted_entities": {}
                }
                
        except Exception as e:
            logger.error(f"意图识别失败: {e}")
            return {
                "selected_agent": "general_chat",
                "confidence": 0.0,
                "reasoning": f"意图识别出错: {str(e)}",
                "extracted_entities": {}
            }
    
    async def generate_response(
        self,
        query: str,
        context: Dict[str, Any],
        system_prompt: str = "",
        stream: bool = False
    ) -> Union[AsyncGenerator[str, None], str]:
        """生成回复"""
        
        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        
        # 添加上下文信息
        if isinstance(context, dict) and context.get("data_sources"):
            context_str = "\n\n相关数据:\n"
            data_sources = context["data_sources"]
            if isinstance(data_sources, dict):
                for source, data in data_sources.items():
                    context_str += f"{source}: {json.dumps(data, ensure_ascii=False, indent=2)}\n"
                query = query + context_str
        
        messages.append({"role": "user", "content": query})
        
        if stream:
            return self.stream_chat_completion(messages)
        else:
            response = await self.chat_completion(messages)
            return response.choices[0].message.content
    
    async def generate_chart_analysis(
        self,
        chart_data: Dict[str, Any],
        chart_type: str
    ) -> AsyncGenerator[str, None]:
        """生成图表分析文本（流式）"""
        
        prompt = f"""请分析以下{chart_type}图表数据，提供简洁的分析说明：

数据: {json.dumps(chart_data, ensure_ascii=False, indent=2)}

请从以下角度分析：
1. 数据趋势
2. 关键发现
3. 可能的原因
4. 建议行动

请用简洁明了的语言，分段输出分析结果。"""
        
        messages = [{"role": "user", "content": prompt}]
        
        async for chunk in self.stream_chat_completion(messages, temperature=0.3):
            yield chunk
    
    async def extract_form_data(
        self,
        user_input: str,
        form_template: Dict[str, Any],
        existing_data: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """从用户输入中提取表单数据"""
        
        logger.info(f"开始提取表单数据，用户输入: {user_input}")
        logger.info(f"表单模板: {form_template}")
        
        existing_str = ""
        if existing_data:
            existing_str = f"\n\n已有数据: {json.dumps(existing_data, ensure_ascii=False, indent=2)}"
        
        prompt = f"""请从用户输入中提取表单数据。

表单模板: {json.dumps(form_template, ensure_ascii=False, indent=2)}

用户输入: "{user_input}"{existing_str}

请返回JSON格式的提取结果，包含以下字段:
{{
    "extracted_data": {{
        "field_name": "extracted_value"
    }},
    "missing_fields": ["field1", "field2"],
    "confidence": 0.95,
    "suggestions": "对缺失字段的建议"
}}

注意：
1. 只提取能够确定的字段值
2. 对于日期时间，请转换为标准格式
3. 对于选择字段，请匹配预定义选项"""
        
        messages = [{"role": "user", "content": prompt}]
        
        try:
            logger.info("调用LLM进行数据提取")
            response = await self.chat_completion(messages, temperature=0.1)
            content = response.choices[0].message.content
            
            logger.info(f"LLM响应类型: {type(content)}, 内容: {content}")
            
            # 确保content是字符串类型
            if not isinstance(content, str):
                logger.warning(f"LLM返回的内容不是字符串类型: {type(content)}")
                return {
                    "extracted_data": {},
                    "missing_fields": [],
                    "confidence": 0.0,
                    "suggestions": "LLM返回格式错误"
                }
            
            try:
                logger.info("开始解析JSON")
                result = json.loads(content)
                logger.info(f"JSON解析成功，结果类型: {type(result)}, 内容: {result}")
                
                # 确保返回的是字典类型
                if not isinstance(result, dict):
                    logger.warning(f"JSON解析结果不是字典类型: {type(result)}")
                    return {
                        "extracted_data": {},
                        "missing_fields": [],
                        "confidence": 0.0,
                        "suggestions": "解析结果格式错误"
                    }
                
                logger.info("开始验证和处理字段")
                # 确保必要字段存在且类型正确
                extracted_data = result.get("extracted_data")
                logger.info(f"extracted_data类型: {type(extracted_data)}, 值: {extracted_data}")
                if not isinstance(extracted_data, dict):
                    logger.warning(f"extracted_data不是字典类型: {type(extracted_data)}, 设置为空字典")
                    result["extracted_data"] = {}
                
                missing_fields = result.get("missing_fields")
                logger.info(f"missing_fields类型: {type(missing_fields)}, 值: {missing_fields}")
                if not isinstance(missing_fields, list):
                    logger.warning(f"missing_fields不是列表类型: {type(missing_fields)}, 设置为空列表")
                    result["missing_fields"] = []
                
                confidence = result.get("confidence")
                logger.info(f"confidence类型: {type(confidence)}, 值: {confidence}")
                if not isinstance(confidence, (int, float)):
                    logger.warning(f"confidence不是数字类型: {type(confidence)}, 设置为0.0")
                    result["confidence"] = 0.0
                
                suggestions = result.get("suggestions")
                logger.info(f"suggestions类型: {type(suggestions)}, 值: {suggestions}")
                if not isinstance(suggestions, str):
                    logger.warning(f"suggestions不是字符串类型: {type(suggestions)}, 设置为默认值")
                    result["suggestions"] = ""
                
                logger.info(f"最终返回结果: {result}")
                return result
                
            except json.JSONDecodeError as e:
                logger.warning(f"JSON解析失败: {e}, 原始内容: {content}")
                return {
                    "extracted_data": {},
                    "missing_fields": [],
                    "confidence": 0.0,
                    "suggestions": "无法解析提取结果"
                }
                
        except Exception as e:
            import traceback
            error_traceback = traceback.format_exc()
            logger.error(f"表单数据提取失败: {e}\n堆栈跟踪:\n{error_traceback}")
            return {
                "extracted_data": {},
                "missing_fields": [],
                "confidence": 0.0,
                "suggestions": f"提取失败: {str(e)}"
            }

# 全局LLM服务实例
llm_service = LLMService()