import json
import time
import requests
from typing import List, Dict, Any, Optional, Generator
from config import config
from retriever import retriever

class ChatManager:
    """对话管理模块"""
    
    def __init__(self):
        self.conversation_history = {}  # 按文章标题分组存储对话历史
        self.max_history = config.MAX_HISTORY
    
    def add_to_history(self, role: str, content: str, article_title: str = "", metadata: Optional[Dict[str, Any]] = None):
        """添加对话到历史记录"""
        if article_title not in self.conversation_history:
            self.conversation_history[article_title] = []
        
        message = {
            "role": role,
            "content": content,
            "timestamp": time.time(),
            "metadata": metadata or {}
        }
        
        self.conversation_history[article_title].append(message)
        
        # 保持历史记录在限制范围内（最多6轮对话，即12条消息）
        if len(self.conversation_history[article_title]) > self.max_history:
            self.conversation_history[article_title] = self.conversation_history[article_title][-self.max_history:]
    
    def get_recent_history(self, article_title: str = "", max_messages: int = 0) -> List[Dict[str, Any]]:
        """获取最近的对话历史"""
        if article_title not in self.conversation_history:
            return []
        
        history = self.conversation_history[article_title]
        
        if max_messages == 0:
            max_messages = len(history)
        
        return history[-max_messages:]
    
    def clear_history(self, article_title: str = ""):
        """清空对话历史"""
        if article_title:
            if article_title in self.conversation_history:
                self.conversation_history[article_title] = []
        else:
            self.conversation_history = {}
    
    def format_history_for_llm(self, article_title: str = "", max_messages: int = 0) -> str:
        """格式化历史记录供LLM使用"""
        recent_history = self.get_recent_history(article_title, max_messages)
        
        print(f"📚 格式化历史记录:")
        print(f"   文章标题: '{article_title}'")
        print(f"   请求消息数: {max_messages}")
        print(f"   实际获取消息数: {len(recent_history)}")
        
        if not recent_history:
            print("   无历史记录")
            return ""
        
        formatted_history = []
        for i, msg in enumerate(recent_history):
            role = "用户" if msg["role"] == "user" else "助手"
            formatted_history.append(f"{role}: {msg['content']}")
            print(f"   消息{i+1}: {role} - {msg['content'][:50]}...")
        
        result = "\n".join(formatted_history)
        print(f"   格式化后长度: {len(result)} 字符")
        return result
    
    def call_llm_stream(self, messages: List[Dict[str, Any]]) -> Generator[str, None, None]:
        """调用LLM进行流式对话"""
        headers = {
            "Authorization": f"Bearer {config.ONEAPI_KEY}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": config.LLM_MODEL,
            "messages": messages,
            "stream": True,
            "max_tokens": config.MAX_TOKENS,
            "temperature": config.TEMPERATURE,
            "timeout": 60  # 添加超时设置
        }
        
        print(f"🔍 发送LLM请求:")
        print(f"   URL: {config.ONEAPI_BASE_URL}/chat/completions")
        print(f"   模型: {config.LLM_MODEL}")
        print(f"   消息数量: {len(messages)}")
        print(f"   最大令牌: {config.MAX_TOKENS}")
        print(f"   🔑 API Key: {config.ONEAPI_KEY[:10]}...{config.ONEAPI_KEY[-10:]}")
        print(f"   📋 请求头: {headers}")
        print(f"   📦 请求数据: {json.dumps(data, ensure_ascii=False, indent=2)}")
        
        try:
            response = requests.post(
                f"{config.ONEAPI_BASE_URL}/chat/completions",
                headers=headers,
                json=data,
                stream=True,
                timeout=60  # 60秒超时
            )
            
            print(f"📡 响应状态码: {response.status_code}")
            print(f"📋 响应头: {dict(response.headers)}")
            
            if response.status_code != 200:
                error_text = response.text
                print(f"❌ LLM请求失败: HTTP {response.status_code}")
                print(f"   错误详情: {error_text}")
                error_msg = f"LLM请求失败: HTTP {response.status_code} - {error_text}"
                yield f"data: {json.dumps({'error': error_msg})}\n\n"
                return
            
            print(f"✅ LLM请求成功，开始处理流式响应...")
            
            for line in response.iter_lines():
                if line:
                    line = line.decode('utf-8')
                    if line.startswith('data: '):
                        data_str = line[6:]  # 移除 'data: ' 前缀
                        if data_str.strip() == '[DONE]':
                            print(f"✅ 流式响应完成")
                            yield f"data: {json.dumps({'done': True})}\n\n"
                            break
                        else:
                            try:
                                chunk = json.loads(data_str)
                                if 'choices' in chunk and len(chunk['choices']) > 0:
                                    delta = chunk['choices'][0].get('delta', {})
                                    if 'content' in delta:
                                        yield f"data: {json.dumps({'content': delta['content']})}\n\n"
                            except json.JSONDecodeError as e:
                                print(f"⚠️ 解析流式数据失败: {e}")
                                continue
                                
        except requests.exceptions.Timeout:
            print(f"⏰ LLM请求超时")
            error_msg = "LLM请求超时，请稍后重试"
            yield f"data: {json.dumps({'error': error_msg})}\n\n"
        except requests.exceptions.ConnectionError as e:
            print(f"🔌 连接错误: {e}")
            error_msg = f"连接OneAPI服务失败: {str(e)}"
            yield f"data: {json.dumps({'error': error_msg})}\n\n"
        except Exception as e:
            print(f"❌ 流式请求异常: {e}")
            error_msg = f"流式请求异常: {str(e)}"
            yield f"data: {json.dumps({'error': error_msg})}\n\n"
    
    def call_llm(self, messages: List[Dict[str, Any]]) -> str:
        """调用LLM进行普通对话"""
        headers = {
            "Authorization": f"Bearer {config.ONEAPI_KEY}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": config.LLM_MODEL,
            "messages": messages,
            "stream": False,
            "max_tokens": config.MAX_TOKENS,
            "temperature": config.TEMPERATURE
        }
        
        print(f"🔍 发送普通LLM请求:")
        print(f"   URL: {config.ONEAPI_BASE_URL}/chat/completions")
        print(f"   模型: {config.LLM_MODEL}")
        print(f"   消息数量: {len(messages)}")
        print(f"   🔑 API Key: {config.ONEAPI_KEY[:10]}...{config.ONEAPI_KEY[-10:]}")
        print(f"   📋 请求头: {headers}")
        print(f"   📦 请求数据: {json.dumps(data, ensure_ascii=False, indent=2)}")
        
        try:
            response = requests.post(
                f"{config.ONEAPI_BASE_URL}/chat/completions",
                headers=headers,
                json=data,
                timeout=30
            )
            
            print(f"📡 响应状态码: {response.status_code}")
            print(f"📋 响应头: {dict(response.headers)}")
            
            if response.status_code == 200:
                result = response.json()
                content = result["choices"][0]["message"]["content"]
                print(f"✅ 普通LLM请求成功")
                print(f"📝 回复内容: {content[:200]}...")
                return content
            else:
                error_text = response.text
                print(f"❌ 普通LLM请求失败: HTTP {response.status_code}")
                print(f"   错误详情: {error_text}")
                return f"LLM请求失败: HTTP {response.status_code} - {error_text}"
                
        except requests.exceptions.Timeout:
            print(f"⏰ 普通LLM请求超时")
            return "LLM请求超时，请稍后重试"
        except requests.exceptions.ConnectionError as e:
            print(f"🔌 连接错误: {e}")
            return f"连接OneAPI服务失败: {str(e)}"
        except Exception as e:
            print(f"❌ 普通LLM请求异常: {e}")
            return f"LLM请求异常: {str(e)}"
    
    def generate_response(self, query: str, search_type: str = "hybrid", article_title: str = "", stream: bool = True) -> Generator[str, None, None]:
        """生成回复"""
        print(f"\n🔍 开始生成回复:")
        print(f"   查询: '{query}'")
        print(f"   检索类型: {search_type}")
        print(f"   文章标题: '{article_title}'")
        print(f"   流式模式: {stream}")
        
        # 执行检索
        search_results = retriever.search(query, search_type, article_title)
        print(f"   检索结果数量: {len(search_results)}")
        
        context = retriever.get_context_from_results(search_results)
        print(f"   上下文长度: {len(context)} 字符")
        print(f"   上下文预览: {context[:200]}...")
        
        # 检查上下文是否为空
        if not context.strip():
            print("⚠️ 警告: 上下文为空，无法生成回复")
            error_msg = "未找到相关文档内容，请尝试其他问题或检查文章标题"
            if stream:
                yield f"data: {json.dumps({'error': error_msg})}\n\n"
            else:
                yield error_msg
            return
        
        # 获取对话历史 - 携带6轮对话（12条消息）
        history_text = self.format_history_for_llm(article_title, max_messages=self.max_history)
        print(f"   对话历史长度: {len(history_text)} 字符")
        if history_text:
            print(f"   对话历史预览: {history_text[:200]}...")
        
        # 构建符合要求的prompt
        if history_text:
            prompt = f"""使用 <Data></Data> 标记中的内容作为你的知识:

<Data>
{context}
</Data>

对话历史:
{history_text}

回答要求：
- 如果你不清楚答案，你需要澄清。
- 避免提及你是从 <Data></Data> 获取的知识。
- 保持答案与 <Data></Data> 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。

问题:
{query}"""
        else:
            prompt = f"""使用 <Data></Data> 标记中的内容作为你的知识:

<Data>
{context}
</Data>

回答要求：
- 如果你不清楚答案，你需要澄清。
- 避免提及你是从 <Data></Data> 获取的知识。
- 保持答案与 <Data></Data> 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。

问题:
{query}"""
        
        print(f"   Prompt长度: {len(prompt)} 字符")
        print(f"   Prompt预览: {prompt[:300]}...")
        
        messages = [
            {"role": "user", "content": prompt}
        ]
        
        # 添加用户消息到历史
        self.add_to_history("user", query, article_title, {"search_results": len(search_results), "article_title": article_title})
        
        print(f"✅ 准备调用LLM...")
        print(f"   🔧 OneAPI配置:")
        print(f"      URL: {config.ONEAPI_BASE_URL}")
        print(f"      Key: {config.ONEAPI_KEY[:10]}...{config.ONEAPI_KEY[-10:]}")
        print(f"      模型: {config.LLM_MODEL}")
        
        if stream:
            # 流式响应
            full_response = ""
            print(f"🚀 开始流式LLM调用...")
            for chunk in self.call_llm_stream(messages):
                if chunk.startswith('data: '):
                    try:
                        data = json.loads(chunk[6:])
                        if 'content' in data:
                            full_response += data['content']
                            yield chunk
                        elif 'error' in data:
                            print(f"❌ LLM返回错误: {data['error']}")
                            yield chunk
                            return
                        elif 'done' in data:
                            # 添加助手回复到历史
                            self.add_to_history("assistant", full_response, article_title)
                            print(f"✅ 流式响应完成，总长度: {len(full_response)} 字符")
                            yield chunk
                            return
                    except json.JSONDecodeError:
                        continue
        else:
            # 普通响应
            print(f"🚀 开始普通LLM调用...")
            response = self.call_llm(messages)
            self.add_to_history("assistant", response, article_title)
            print(f"✅ 普通响应完成，长度: {len(response)} 字符")
            yield response
    
    def get_conversation_summary(self) -> Dict[str, Any]:
        """获取对话摘要"""
        total_messages = sum(len(messages) for messages in self.conversation_history.values())
        user_messages = sum(len([msg for msg in messages if msg["role"] == "user"]) for messages in self.conversation_history.values())
        assistant_messages = sum(len([msg for msg in messages if msg["role"] == "assistant"]) for messages in self.conversation_history.values())
        recent_messages = self.get_recent_history(max_messages=6)  # 最近3轮对话
        return {
            "total_messages": total_messages,
            "user_messages": user_messages,
            "assistant_messages": assistant_messages,
            "recent_messages": recent_messages
        }

# 全局对话管理器实例
chat_manager = ChatManager() 