import json
import logging
import os
import time
import asyncio
from typing import Dict, List, Any, Optional, Annotated, TypedDict
from enum import Enum

import pymysql
import dashscope
import numpy as np
from dotenv import load_dotenv
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage

from ..llm import TongyiLLMConfig, TongyiLLM
from ..tools import SofaRetrievalTool
from ..prompt.prompt_list import (
    extract_info_prompt, 
    recommendation_prompt, 
    normal_chat_prompt, 
    intent_classification_prompt,
    product_detail_response_prompt,
    user_guidance_prompt
)

logger = logging.getLogger(__name__)

class IntentType(Enum):
    """用户意图类型"""
    NORMAL_CHAT = "normal_chat"  # 普通聊天
    PRODUCT_RECOMMENDATION = "product_recommendation"  # 产品推荐
    PRODUCT_DETAIL_INQUIRY = "product_detail_inquiry"  # 产品详细信息咨询
    OTHER = "other"  # 其他意图

class ConversationState(TypedDict):

    """对话状态"""

    # 消息历史
    messages: Annotated[List[BaseMessage], add_messages]

    # 本轮用户意图
    intent: Optional[str]

    # 提取的产品条件
    extracted_conditions: Optional[Dict[str, Any]]

    # 最近的产品搜索结果
    product_search_results: Optional[List[Dict]]

    # 最近的产品详情检索结果
    product_detail_results: Optional[Dict[str, Any]] 
    last_user_message: Optional[str]

    # 上传的图片路径
    uploaded_image_path: Optional[str]  

    # 推理的产品ID（如果有的话）
    inferred_product_id: Optional[int]  

class SofaConversationAgent:
    """基于 LangGraph 的沙发咨询对话 Agent"""
    
    def __init__(self, table_name: str = "sofa_demo_v2", topk: int = 5):
        # 初始化 LLM
        self.llm_config = TongyiLLMConfig(llm_name='qwen-plus')
        self.llm = TongyiLLM(config=self.llm_config)
        
        # 初始化检索工具
        self.retrieval_tool = SofaRetrievalTool(table_name=table_name, topk=topk)
        
        # 构建工作流图
        self.workflow = self._build_workflow()
        self.app = self.workflow.compile()
        
        # 初始化内部状态（Graph 自管理）
        self._reset_state()
    
    def _reset_state(self):
        """重置内部状态"""
        self._internal_state = ConversationState(
            messages=[],
            intent=None,
            extracted_conditions=None,
            product_search_results=None,
            product_detail_results=None,
            last_user_message=None,
            uploaded_image_path=None,
            inferred_product_id=None
        )
    
    def clear_history(self):
        """清理对话历史，重新初始化状态"""
        self._reset_state()
        logger.info("对话历史已清理，状态已重置")
    
    def print_state(self, show_full_messages=True):
        """
        打印当前对话状态的详细信息
        
        Args:
            show_full_messages: 是否显示完整的消息内容，False则只显示消息摘要
        """
        state = self._internal_state
        
        print("\n" + "="*80)
        print("📊 对话状态详细信息")
        print("="*80)
        
        # 1. 基本状态信息
        print(f"🎯 当前意图: {state.get('intent', 'None')}")
        print(f"💬 最后用户消息: {state.get('last_user_message', 'None')}")
        print(f"🖼️  上传图片路径: {state.get('uploaded_image_path', 'None')}")
        print(f"🔍 推理的产品ID: {state.get('inferred_product_id', 'None')}")
        
        # 2. 消息历史
        messages = state.get("messages", [])
        print(f"\n📜 消息历史 (共 {len(messages)} 条):")
        if not messages:
            print("  📭 暂无消息")
        else:
            for i, msg in enumerate(messages, 1):
                if isinstance(msg, HumanMessage):
                    role = "👤 用户"
                    content = msg.content
                elif isinstance(msg, AIMessage):
                    role = "🤖 助手"
                    content = msg.content
                else:
                    role = "❓ 未知"
                    content = str(msg)
                
                if show_full_messages:
                    # 显示完整消息，但限制长度避免过长
                    if len(content) > 200:
                        display_content = content[:200] + "..."
                    else:
                        display_content = content
                    print(f"  {i}. {role}: {display_content}")
                else:
                    # 只显示消息摘要
                    summary = content[:50] + "..." if len(content) > 50 else content
                    print(f"  {i}. {role}: {summary}")
        
        # 3. 提取的条件
        conditions = state.get("extracted_conditions")
        print(f"\n🔧 提取的条件:")
        if not conditions:
            print("  📭 暂无提取条件")
        else:
            import json
            print(f"  {json.dumps(conditions, ensure_ascii=False, indent=4)}")
        
        # 4. 搜索结果
        product_search_results = state.get("product_search_results")
        print(f"\n🔍 搜索结果:")
        if not product_search_results:
            print("  📭 暂无搜索结果")
        else:
            print(f"  📦 找到 {len(product_search_results)} 个产品:")
            for i, product in enumerate(product_search_results[:5], 1):  # 只显示前5个
                similarity = product.get('similarity', 0)
                print(f"    {i}. ID:{product.get('id')} - {product.get('name')} (相似度:{similarity:.4f})")
            if len(product_search_results) > 5:
                print(f"    ... 还有 {len(product_search_results) - 5} 个结果")
        
        # 6. 产品详情结果
        detail_results = state.get("product_detail_results")
        print(f"\n📋 产品详情结果:")
        if not detail_results:
            print("  📭 暂无详情结果")
        elif "error" in detail_results:
            print(f"  ❌ 错误: {detail_results['error']}")
        else:
            basic_info = detail_results.get("product_basic_info", {})
            relevant_chunks = detail_results.get("relevant_chunks", [])
            print(f"  📦 产品: {basic_info.get('name', '未知')}")
            print(f"  💰 价格: ¥{basic_info.get('price', '未知')}")
            print(f"  📄 相关文档块: {len(relevant_chunks)} 个")
            if relevant_chunks:
                for i, chunk in enumerate(relevant_chunks[:3], 1):  # 只显示前3个
                    similarity = chunk.get('similarity', 0)
                    print(f"    {i}. {chunk.get('chunk_title', '未知标题')} (相似度:{similarity:.4f})")
        
        print("\n" + "="*80)
        print("✅ 状态信息打印完成")
        print("="*80 + "\n")
    
    def _clean_json_response(self, response_text: str) -> str:
        """清理LLM响应中的代码块标记"""
        if response_text.startswith("```json"):
            # 移除代码块标记
            response_text = response_text.replace("```json", "").replace("```", "").strip()
        elif response_text.startswith("```"):
            # 移除普通代码块标记
            response_text = response_text.replace("```", "").strip()
        return response_text
    
    def _identify_intent(self, state: ConversationState) -> tuple:
        """使用 LLM 识别用户意图，返回 (intent, product_id)"""
        if not state["messages"]:
            return IntentType.OTHER.value, None
        
        last_message = state["messages"][-1]
        if not isinstance(last_message, HumanMessage):
            return IntentType.OTHER.value, None
        
        user_input = last_message.content
        if not user_input:
            return IntentType.OTHER.value, None
        
        # 构建对话上下文，包含推荐产品信息
        context_messages = state["messages"][-5:] if len(state["messages"]) > 5 else state["messages"][:-1]
        conversation_context = ""
        
        if context_messages:
            context_lines = []
            for msg in context_messages:
                msg_content = msg.content if msg.content else ""
                if isinstance(msg, HumanMessage):
                    context_lines.append(f"用户: {msg_content}")
                elif isinstance(msg, AIMessage):
                    context_lines.append(f"助手: {msg_content[:200]}...")  # 增加截断长度以包含更多产品信息
            conversation_context = "\n".join(context_lines)
        else:
            conversation_context = "无历史对话"
        
        # 添加推荐产品信息到上下文

        product_search_results = state.get("product_search_results", [])
        if product_search_results:
            product_info_lines = ["\n=== 最近推荐的产品信息 ==="]
            for i, product in enumerate(product_search_results, 1):
                product_info_lines.append(f"产品{i}: ID={product.get('id')}, 名称={product.get('name')}")
            conversation_context += "\n".join(product_info_lines)
        
        # 构建意图分类 prompt
        prompt = intent_classification_prompt.format(
            user_input=user_input,
            conversation_context=conversation_context
        )
        
        try:
            response = self.llm.chat(prompt)
            
            if response.status_code == 200:
                response_text = response.output.text
                
                # 处理可能的代码块包装
                response_text = self._clean_json_response(response_text)
                
                # 解析 JSON 响应
                result = json.loads(response_text)
                intent = result.get("intent", IntentType.OTHER.value)
                confidence = result.get("confidence", 0.0)
                reason = result.get("reason", "")
                product_id = result.get("product_id", None)
                
                logger.info(f"LLM 意图识别结果: {intent} (置信度: {confidence:.2f}, 理由: {reason}, 产品ID: {product_id})")
                
                # 验证意图类型是否有效
                valid_intents = [
                    IntentType.NORMAL_CHAT.value, 
                    IntentType.PRODUCT_RECOMMENDATION.value, 
                    IntentType.PRODUCT_DETAIL_INQUIRY.value,
                    IntentType.OTHER.value
                ]
                
                if intent in valid_intents:
                    # 双重检查：如果意图是product_detail_inquiry但没有产品ID，则改为product_recommendation
                    if intent == IntentType.PRODUCT_DETAIL_INQUIRY.value and not product_id:
                        logger.warning(f"产品详细信息咨询但无法推理产品ID，改为产品推荐意图")
                        return IntentType.PRODUCT_RECOMMENDATION.value, None
                    
                    return intent, product_id
                else:
                    logger.warning(f"无效的意图类型: {intent}, 默认为 other")
                    return IntentType.OTHER.value, None
            else:
                logger.error(f"LLM 意图识别失败: {response}")
                return IntentType.OTHER.value, None
                
        except json.JSONDecodeError as e:
            logger.error(f"意图分类 JSON 解析失败: {e}")
            return IntentType.OTHER.value, None
        except Exception as e:
            logger.error(f"意图识别异常: {e}")
            return IntentType.OTHER.value, None
    
    def _analyze_intent(self, state: ConversationState) -> ConversationState:
        """分析用户意图节点"""
        intent, product_id = self._identify_intent(state)
        state["intent"] = intent
        state["inferred_product_id"] = product_id
        
        # 保存最后一条用户消息
        if state["messages"]:
            last_message = state["messages"][-1]
            if isinstance(last_message, HumanMessage):
                state["last_user_message"] = last_message.content
        
        logger.info(f"识别到的意图: {intent}, 推理的产品ID: {product_id}")
        return state
    
    def _normal_chat(self, state: ConversationState) -> ConversationState:
        """处理普通聊天节点"""

        user_message = state["last_user_message"] or ""
        
        prompt = normal_chat_prompt.format(user_content=user_message)
        response = self.llm.chat(prompt)
        
        if response.status_code == 200:
            ai_message = AIMessage(content=response.output.text)
            state["messages"].append(ai_message)
        else:
            error_message = AIMessage(content="抱歉，我暂时无法处理您的请求，请稍后再试。")
            state["messages"].append(error_message)
        
        return state
    
    def _extract_conditions(self, state: ConversationState) -> ConversationState:
        """提取产品条件节点"""
        user_message = state["last_user_message"] or ""
        
        # 直接进行条件提取
        try:
            # 调试信息：打印输入的文本
            logger.info(f"🔍 [调试] 提取条件 - 输入文本: {user_message}")
            
            prompt = extract_info_prompt.format(user_info=user_message)
            response = self.llm.chat(prompt)
            
            if response.status_code == 200:
                response_text = response.output.text
                
                # 处理可能的代码块包装
                response_text = self._clean_json_response(response_text)
                
                result = json.loads(response_text)
                # 过滤掉 null 值
                conditions = {k: v for k, v in result.items() if v is not None}
                
                # 调试信息：打印过滤的结构化信息
                logger.info(f"📊 [调试] 提取条件 - 结构化输出: {json.dumps(conditions, ensure_ascii=False, indent=2)}")
                
            else:
                logger.error(f"条件提取失败: {response}")
                conditions = {}
        except Exception as e:
            logger.error(f"条件提取异常: {e}")
            conditions = {}
        
        state["extracted_conditions"] = conditions
        logger.info(f"提取的条件: {conditions}")
        return state
    
    def _retrieve_product_details(self, state: ConversationState) -> ConversationState:
        """检索产品详细信息节点"""
        user_message = state["last_user_message"] or ""
        product_id = state.get("inferred_product_id")
        
        # 如果没有推理出产品ID，设置错误信息并返回
        if not product_id:
            error_message = {
                "error": "无法确定要查询的具体产品，请明确指定产品ID或产品名称"
            }
            state["product_detail_results"] = error_message
            logger.warning("没有推理出产品ID，无法检索产品详细信息")
            return state
        
        # 使用 retrieval_tool 检索产品详细信息
        try:
            logger.info(f"🔍 [调试] 产品详细信息检索 - 产品ID: {product_id}")
            logger.info(f"🔍 [调试] 产品详细信息检索 - 查询文本: {user_message}")
            
            # 调用 retrieval_tool 的产品详情检索功能
            detail_results = self.retrieval_tool.search_product_details(
                product_id=product_id,
                query=user_message,
                topk=3
            )
            
            # 添加调试日志
            if "error" not in detail_results:
                relevant_chunks = detail_results.get("relevant_chunks", [])
                logger.info(f"📊 [调试] 产品详细信息检索 - 找到 {len(relevant_chunks)} 个相关分块")
                for i, chunk in enumerate(relevant_chunks):
                    logger.info(f"📊 [调试] 分块{i+1}: {chunk['chunk_title']} (相似度: {chunk['similarity']:.4f})")
            
        except Exception as e:
            logger.error(f"产品详细信息检索异常: {e}")
            detail_results = {"error": f"检索失败: {str(e)}"}
        
        state["product_detail_results"] = detail_results
        logger.info(f"检索产品ID {product_id} 的详细信息完成")
        return state
    
    def _respond_product_details(self, state: ConversationState) -> ConversationState:
        """生成产品详细信息回复节点"""
        detail_results = state.get("product_detail_results", {})
        user_message = state["last_user_message"] or ""
        
        if "error" in detail_results:
            error_message = AIMessage(content=f"抱歉，{detail_results['error']}，请稍后再试。")
            state["messages"].append(error_message)
            return state
        
        # 获取产品基本信息和相关分块
        basic_info = detail_results.get("product_basic_info", {})
        relevant_chunks = detail_results.get("relevant_chunks", [])
        
        if not relevant_chunks:
            # 如果没有找到相关分块，提供基本信息
            response_text = f"""很抱歉，关于您询问的产品详情，我暂时只能为您提供以下基本信息：

**产品名称**: {basic_info.get('name', '未知')}
**材质**: {basic_info.get('material', '未知')}
**风格**: {basic_info.get('style', '未知')}
**价格**: ¥{basic_info.get('price', '未知')}
**尺寸**: {basic_info.get('size', '未知')}

我将继续完善我的知识库，感谢您的理解。"""
            
            ai_message = AIMessage(content=response_text)
            state["messages"].append(ai_message)
            return state
        
        # 构建参考信息
        basic_info_text = f"""产品名称：{basic_info.get('name', '未知')}
材质：{basic_info.get('material', '未知')}
风格：{basic_info.get('style', '未知')}
价格：¥{basic_info.get('price', '未知')}
尺寸：{basic_info.get('size', '未知')}
颜色：{basic_info.get('color', '未知')}
品牌：{basic_info.get('brand', '未知')}
详细尺寸：{basic_info.get('dimensions', '未知')}
特点：{basic_info.get('features', '未知')}
优惠政策：{basic_info.get('promotion_policy', '暂无')}
"""
        
        chunks_text = ""
        if relevant_chunks:
            chunk_items = []
            for i, chunk in enumerate(relevant_chunks, 1):
                chunk_items.append(f"分块{i}：{chunk['chunk_title']}（相似度：{chunk['similarity']:.2%}）")
                chunk_items.append(f"内容：{chunk['chunk_content']}")
                chunk_items.append("")
            chunks_text = "\n".join(chunk_items)
        else:
            chunks_text = "暂无相关详细资料"
        
        detail_prompt = product_detail_response_prompt.format(
            product_basic_info=basic_info_text,
            user_question=user_message,
            relevant_chunks=chunks_text
        )

        #print(f'***********prompt***************\n{detail_prompt}\n***********prompt***************')
        try:
            response = self.llm.chat(detail_prompt)
            
            if response.status_code == 200:
                ai_message = AIMessage(content=response.output.text)
            else:
                fallback_text = f"抱歉，在处理您关于「{user_message}」的咨询时遇到了技术问题。请稍后再试，或联系客服获取详细信息。"
                ai_message = AIMessage(content=fallback_text)
                
        except Exception as e:
            logger.error(f"LLM生成产品详情回答异常: {e}")
            fallback_text = f"抱歉，在处理您关于「{user_message}」的咨询时遇到了技术问题。请稍后再试，或联系客服获取详细信息。"
            ai_message = AIMessage(content=fallback_text)
        
        state["messages"].append(ai_message)
        return state
    
    def _has_conditions(self, state: ConversationState) -> str:
        """判断是否有筛选条件"""
        conditions = state.get("extracted_conditions", {})
        image_path = state.get("uploaded_image_path")
        
        # 如果有图片，即使没有文本条件也认为有条件，因为图片本身就是条件
        if image_path:
            logger.info(f"🖼️ [调试] 检测到上传图片，将进行图片检索: {image_path}")
            return "has_conditions"
        
        # 检查是否有文本提取的条件
        if conditions and any(conditions.values()):
            logger.info(f"📝 [调试] 检测到文本条件，将进行文本检索: {conditions}")
            return "has_conditions"
        else:
            logger.info(f"❓ [调试] 未检测到任何条件，将引导用户")
            return "no_conditions"
    
    def _guide_user(self, state: ConversationState) -> ConversationState:
        """智能生成个性化用户引导节点"""
        user_message = state["last_user_message"] or ""
        
        # 构建对话上下文
        context_messages = state["messages"][-3:] if len(state["messages"]) > 3 else state["messages"][:-1]
        conversation_context = ""
        
        if context_messages:
            context_lines = []
            for msg in context_messages:
                msg_content = msg.content if msg.content else ""
                if isinstance(msg, HumanMessage):
                    context_lines.append(f"用户: {msg_content}")
                elif isinstance(msg, AIMessage):
                    context_lines.append(f"助手: {msg_content[:100]}...")  # 截断以避免过长
            conversation_context = "\n".join(context_lines)
        else:
            conversation_context = "这是用户的首次咨询"
        
        # 构建智能引导 prompt
        prompt = user_guidance_prompt.format(
            user_input=user_message,
            conversation_context=conversation_context
        )
        
        try:
            response = self.llm.chat(prompt)
            
            if response.status_code == 200:
                guidance_message = response.output.text
                logger.info(f"智能引导生成成功，内容长度: {len(guidance_message)}")
            else:
                # 降级到默认引导消息
                logger.warning(f"LLM引导生成失败: {response}, 使用默认引导")
                guidance_message = self._get_default_guidance(user_message)
                
        except Exception as e:
            logger.error(f"智能引导生成异常: {e}, 使用默认引导")
            guidance_message = self._get_default_guidance(user_message)
        
        ai_message = AIMessage(content=guidance_message)
        state["messages"].append(ai_message)
        state["product_search_results"] = []  # 确保搜索结果为空
        return state
    
    def _get_default_guidance(self, user_input: str) -> str:
        """获取默认的引导消息（备用方案）"""
        return f"""感谢您对我们沙发产品的关注！我注意到您提到了"{user_input[:50]}..."，为了给您更精准的推荐，我想了解一些细节：

🏠 **使用场景**：您打算把沙发放在客厅、卧室还是其他空间？
📏 **空间大小**：您的空间大概有多大？需要几人座的沙发？
🎨 **风格偏好**：您更喜欢现代简约、北欧、美式还是其他风格？
🧵 **材质要求**：偏向真皮、布艺还是科技布材质？
💰 **预算范围**：您的预算大概在什么区间？

您也可以上传一张喜欢的沙发图片，我能帮您找到相似的产品。请告诉我您的具体需求，我会为您提供最合适的建议！"""
    
    def _retrieve_products(self, state: ConversationState) -> ConversationState:
        """检索产品节点"""
        conditions = state.get("extracted_conditions", {})
        user_message = state["last_user_message"] or ""
        image_path = state.get("uploaded_image_path")
        
        # 从提取的条件中获取搜索查询文本
        search_query = conditions.get("search_query", "") or user_message
        
        # 根据是否有图片选择搜索类型
        if image_path:
            search_type = "hybrid" if search_query.strip() else "image"
            logger.info(f"🖼️ [调试] 使用图片检索，搜索类型: {search_type}")
        else:
            search_type = "text"
            logger.info(f"📝 [调试] 使用文本检索")
        
        # 移除 search_query 字段避免传递给数据库过滤
        filters = {k: v for k, v in conditions.items() if k != "search_query"}
        
        # 直接调用检索工具
        try:
            # 调试信息：打印检索参数
            logger.info(f"🔍 [调试] 产品检索 - 搜索类型: {search_type}")
            logger.info(f"🔍 [调试] 产品检索 - 文本查询: {search_query}")
            logger.info(f"🔍 [调试] 产品检索 - 图片路径: {image_path}")
            logger.info(f"🔍 [调试] 产品检索 - 过滤条件: {json.dumps(filters, ensure_ascii=False, indent=2) if filters else 'None'}")
            
            if search_type == "text":
                results = self.retrieval_tool.search_by_text(search_query, filters)
            elif search_type == "image":
                results = self.retrieval_tool.search_by_image(image_path, filters)
            elif search_type == "hybrid":
                results = self.retrieval_tool.search_hybrid(search_query, image_path, filters)
            else:
                logger.error(f"不支持的搜索类型: {search_type}")
                results = []
            
            # 调试信息：打印检索结果
            logger.info(f"📊 [调试] 产品检索 - 检索到 {len(results)} 个产品")
            if results:
                logger.info(f"📊 [调试] 产品检索 - 前3个结果的相似度: {[r.get('similarity', 0) for r in results[:3]]}")
                
        except Exception as e:
            logger.error(f"产品检索异常: {e}")
            results = []
        
        state["product_search_results"] = results
        logger.info(f"检索到 {len(results)} 个产品")
        return state
    
    def _recommend_products(self, state: ConversationState) -> ConversationState:
        """推荐产品节点"""
        results = state.get("product_search_results", [])
        user_message = state["last_user_message"] or ""
        
        # 格式化产品信息
        if results:
            products_info = []
            for i, product in enumerate(results[:3], 1):  # 最多推荐3个产品
                product_str = f"""产品{i}（ID: {product['id']}）：{product['name']}
- 材质：{product['material']}
- 风格：{product['style']}
- 价格：{product['price']}元
- 尺寸：{product['size']}
- 颜色：{product['color']}
- 品牌：{product['brand']}
- 特色功能：{product['features']}
- 具体尺寸：{product['dimensions']}
- 优惠政策：{json.dumps(product['promotion_policy'], ensure_ascii=False)}
"""
                products_info.append(product_str)
            
            option_products = "\n\n".join(products_info)
        else:
            option_products = "未找到符合条件的产品"
        
        # 生成推荐回复
        prompt = recommendation_prompt.format(
            option_products=option_products,
            user_content=user_message
        )

        #print(f'***********prompt***************\n{prompt}\n***********prompt***************')
        response = self.llm.chat(prompt)
        
        if response.status_code == 200:
            ai_message = AIMessage(content=response.output.text)
        else:
            ai_message = AIMessage(content="抱歉，推荐系统暂时出现问题，请稍后再试。")
        
        state["messages"].append(ai_message)
        
        return state
    
    def _route_intent(self, state: ConversationState) -> str:
        """路由用户意图"""
        intent = state.get("intent", IntentType.OTHER.value)
        
        if intent == IntentType.NORMAL_CHAT.value:
            return "normal_chat"
        elif intent == IntentType.PRODUCT_RECOMMENDATION.value:
            return "product_recommendation"
        elif intent == IntentType.PRODUCT_DETAIL_INQUIRY.value:
            return "product_detail_inquiry"
        else:
            return "other"
    
    def _handle_other(self, state: ConversationState) -> ConversationState:
        """处理其他意图节点"""
        user_message = state["last_user_message"] or ""
        
        response_text = """我是您的专业沙发产品咨询助手。我可以帮您：

1. 🛋️ 推荐合适的沙发产品
2. 📝 了解不同材质和风格的特点
3. 💰 提供价格和优惠信息
4. 📍 查询售后服务点
5. 🔍 根据图片找相似产品

请告诉我您想了解什么，我会尽力为您提供专业的建议！"""
        
        ai_message = AIMessage(content=response_text)
        state["messages"].append(ai_message)
        return state
    
    def _build_workflow(self) -> StateGraph:
        """构建 LangGraph 工作流"""
        workflow = StateGraph(ConversationState)
        
        # 添加节点
        workflow.add_node("analyze_intent", self._analyze_intent)
        workflow.add_node("normal_chat", self._normal_chat)
        workflow.add_node("extract_conditions", self._extract_conditions)
        workflow.add_node("guide_user", self._guide_user)
        workflow.add_node("retrieve_products", self._retrieve_products)
        workflow.add_node("recommend_products", self._recommend_products)
        workflow.add_node("retrieve_product_details", self._retrieve_product_details)
        workflow.add_node("respond_product_details", self._respond_product_details)
        workflow.add_node("handle_other", self._handle_other)
        
        # 设置入口点 - 直接从 START 到 analyze_intent
        workflow.add_edge(START, "analyze_intent")
        
        # 添加条件路由
        workflow.add_conditional_edges(
            "analyze_intent",
            self._route_intent,
            {
                "normal_chat": "normal_chat",
                "product_recommendation": "extract_conditions",
                "product_detail_inquiry": "retrieve_product_details",
                "other": "handle_other"
            }
        )
        
        workflow.add_conditional_edges(
            "extract_conditions",
            self._has_conditions,
            {
                "has_conditions": "retrieve_products",
                "no_conditions": "guide_user"
            }
        )
        
        workflow.add_edge("retrieve_products", "recommend_products")
        workflow.add_edge("retrieve_product_details", "respond_product_details")
        
        # 设置终点
        workflow.add_edge("normal_chat", END)
        workflow.add_edge("guide_user", END)
        workflow.add_edge("recommend_products", END)
        workflow.add_edge("respond_product_details", END)
        workflow.add_edge("handle_other", END)
        
        return workflow

    def chat_stream(self, user_input: str, image_path: Optional[str] = None):
        """
        流式对话接口，实时返回AI回复
        
        Args:
            user_input: 用户输入
            image_path: 上传的图片路径（可选）
            
        Yields:
            实时的AI回复和意图信息
        """
        # 如果有图片，在用户消息中包含图片信息
        if image_path:
            enhanced_content = f"{user_input}\n\n[用户上传了一张图片: {image_path}]"
            user_message = HumanMessage(content=enhanced_content)
            self._internal_state["uploaded_image_path"] = image_path
        else:
            user_message = HumanMessage(content=user_input)
            self._internal_state["uploaded_image_path"] = None
        
        # 更新内部状态
        self._internal_state["messages"].append(user_message)
        self._internal_state["last_user_message"] = user_input

        try:
            # 使用异步流式处理
            import asyncio
            
            #调用之前打印state
            #self.print_state(show_full_messages=False)

            async def _stream_response():
                intent_sent = False
                current_state = self._internal_state.copy()

                # 使用 Graph 执行，并收集最终状态
                async for chunk in self.app.astream(current_state, stream_mode="updates"):

                    # 实时更新当前状态以保持同步
                    for node_name, node_result in chunk.items():
                        current_state.update(node_result)
                    
                    # 检查是否有意图信息
                    if not intent_sent:
                        for node_name, node_result in chunk.items():
                            if node_name == 'analyze_intent' and "intent" in node_result:
                                yield {"type": "intent", "content": node_result["intent"]}
                                intent_sent = True
                                break
                    
                    # 检查是否有新的AI消息（只输出新生成的消息）
                    for node_name, node_result in chunk.items():
                        if "messages" in node_result:
                            messages = node_result["messages"]

                            # 检查最后一条消息是否是AI消息
                            if messages and isinstance(messages[-1], AIMessage):
                                # 逐字符流式输出
                                for char in messages[-1].content:
                                    yield {"type": "content", "content": char}
                                    await asyncio.sleep(0.01)  # 控制输出速度
                                    
                            break
                
                # Graph 执行完成后，同步最终状态到内部状态
                self._internal_state.update(current_state)
                logger.info("✅ Graph 执行完成，状态已同步到内部状态")
                        
            # 处理同步/异步环境
            try:
                loop = asyncio.get_running_loop()
            except RuntimeError:
                loop = None
            
            if loop is None:
                # 创建新的事件循环
                async def _sync_wrapper():
                    async for item in _stream_response():
                        yield item
                
                loop = asyncio.new_event_loop()
                asyncio.set_event_loop(loop)
                try:
                    generator = _sync_wrapper()
                    while True:
                        try:
                            item = loop.run_until_complete(generator.__anext__())
                            yield item
                        except StopAsyncIteration:
                            break
                finally:
                    loop.close()
            else:
                # 已经在异步环境中，需要特殊处理
                def run_async_generator():
                    new_loop = asyncio.new_event_loop()
                    asyncio.set_event_loop(new_loop)
                    try:
                        async def _wrapper():
                            async for item in _stream_response():
                                yield item
                        
                        generator = _wrapper()
                        while True:
                            try:
                                item = new_loop.run_until_complete(generator.__anext__())
                                yield item
                            except StopAsyncIteration:
                                break
                    finally:
                        new_loop.close()
                
                yield from run_async_generator()
                    
        except Exception as e:
            logger.error(f"流式对话处理异常: {e}")
            error_msg = "抱歉，系统出现了问题，请稍后再试。"
            yield {"type": "intent", "content": "other"}
            for char in error_msg:
                yield {"type": "content", "content": char}
                time.sleep(0.02)
