#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
私人RAG智能对话机器人
结合Milvus向量检索和Ollama本地大语言模型
实现基于文件内容的智能问答
"""

import json
import time
import requests
from datetime import datetime
from typing import List, Dict, Optional, Tuple
from dataclasses import dataclass

from .QueuedFileSearchTools import QueuedFileSearchTool


@dataclass
class ChatMessage:
    """对话消息数据类"""
    role: str  # 'user' 或 'assistant'
    content: str
    timestamp: float
    sources: List[Dict] = None  # 引用的文件来源


@dataclass 
class RAGContext:
    """RAG上下文数据类"""
    query: str
    retrieved_files: List[Dict]
    context_text: str
    response: str
    sources: List[str]


class OllamaClient:
    """Ollama本地模型客户端"""
    
    def __init__(self, base_url: str = "http://172.26.32.1:11434"):
        self.base_url = base_url.rstrip('/')
        self.session = requests.Session()
        self.session.timeout = 180  # 增加到3分钟
    
    def list_models(self) -> List[Dict]:
        """获取可用模型列表"""
        try:
            response = self.session.get(f"{self.base_url}/api/tags")
            response.raise_for_status()
            return response.json().get('models', [])
        except Exception as e:
            print(f"❌ 获取模型列表失败: {e}")
            return []
    
    def generate(self, model: str, prompt: str, system: str = None, 
                 stream: bool = False, options: Dict = None) -> str:
        """生成文本响应"""
        try:
            payload = {
                "model": model,
                "prompt": prompt,
                "stream": stream,
                "options": options or {}
            }
            
            if system:
                payload["system"] = system
            
            response = self.session.post(
                f"{self.base_url}/api/generate",
                json=payload,
                timeout=300  # 5分钟超时
            )
            response.raise_for_status()
            
            if stream:
                return self._handle_stream_response(response)
            else:
                result = response.json()
                return result.get('response', '').strip()
                
        except Exception as e:
            print(f"❌ 生成响应失败: {e}")
            return f"抱歉，生成响应时出现错误: {str(e)}"
    
    def generate_stream(self, model: str, prompt: str, system: str = None,
                       options: Dict = None):
        """流式生成文本响应"""
        try:
            payload = {
                "model": model,
                "prompt": prompt,
                "stream": True,
                "system": system,
                "options": options or {}
            }
            
            response = self.session.post(
                f"{self.base_url}/api/generate",
                json=payload,
                stream=True,
                timeout=300  # 5分钟超时
            )
            response.raise_for_status()
            
            for line in response.iter_lines():
                if line:
                    try:
                        data = json.loads(line.decode('utf-8'))
                        if 'response' in data:
                            yield data['response']
                        if data.get('done', False):
                            break
                    except json.JSONDecodeError:
                        continue
                        
        except Exception as e:
            yield f"生成响应时出现错误: {str(e)}"
    
    def _handle_stream_response(self, response) -> str:
        """处理流式响应"""
        full_response = ""
        for line in response.iter_lines():
            if line:
                try:
                    data = json.loads(line.decode('utf-8'))
                    if 'response' in data:
                        full_response += data['response']
                    if data.get('done', False):
                        break
                except json.JSONDecodeError:
                    continue
        return full_response.strip()
    
    def test_connection(self) -> bool:
        """测试连接"""
        try:
            models = self.list_models()
            return len(models) > 0
        except:
            return False


class RAGChatBot:
    """RAG智能对话机器人"""
    
    def __init__(self, 
                 ollama_url: str = "http://172.26.32.1:11434",
                 milvus_collection: str = "workspace_files",
                 default_model: str = "deepseek-r1:14b"):
        """
        初始化RAG对话机器人
        
        Args:
            ollama_url: Ollama服务地址
            milvus_collection: Milvus集合名称
            default_model: 默认使用的模型
        """
        
        # 初始化Ollama客户端
        self.ollama_client = OllamaClient(ollama_url)
        self.current_model = default_model
        
        # 初始化Milvus搜索工具
        self.search_tool = QueuedFileSearchTool(collection_name=milvus_collection)
        
        # 对话历史
        self.chat_history: List[ChatMessage] = []
        
        # RAG配置
        self.max_context_files = 3  # 减少检索文件数，避免上下文过长
        self.max_context_length = 2000  # 减少上下文长度，加快处理速度
        self.similarity_threshold = 0.5  # 提高相似度阈值，获取更相关的结果
        
        # 系统提示词 - 简化版本
        self.system_prompt = """你是文件搜索助手。基于提供的文件列表回答用户问题，简洁明了，标注文件来源。使用中文回答。"""
    
    def initialize(self) -> bool:
        """初始化并测试连接"""
        print("🚀 初始化RAG智能对话机器人...")
        
        # 测试Ollama连接
        if not self.ollama_client.test_connection():
            print("❌ 无法连接到Ollama服务")
            return False
        
        # 获取可用模型
        models = self.ollama_client.list_models()
        if not models:
            print("❌ 未找到可用的Ollama模型")
            return False
        
        print(f"✅ Ollama连接成功，找到 {len(models)} 个模型:")
        for model in models[:3]:  # 显示前3个模型
            print(f"   - {model['name']}")
        
        # 检查默认模型是否可用
        model_names = [m['name'] for m in models]
        if self.current_model not in model_names:
            if model_names:
                self.current_model = model_names[0]
                print(f"⚠️ 切换到可用模型: {self.current_model}")
            else:
                print("❌ 没有可用的模型")
                return False

        print(f"使用模型: {self.current_model}")
        # 测试Milvus连接
        try:
            stats = self.search_tool.get_collection_stats()
            print(f"✅ Milvus连接成功，集合中有 {stats['total_entities']:,} 条记录")
        except Exception as e:
            print(f"❌ Milvus连接失败: {e}")
            return False
        
        print("🎉 RAG系统初始化完成！")
        return True
    
    def search_files(self, query: str, top_k: int = None) -> List[Dict]:
        """搜索相关文件"""
        if top_k is None:
            top_k = self.max_context_files
        
        try:
            results = self.search_tool.search_files(query, top_k=top_k)
            # 过滤低相似度结果
            filtered_results = [
                r for r in results 
                if r['similarity_score'] >= self.similarity_threshold
            ]
            return filtered_results
        except Exception as e:
            print(f"❌ 文件搜索失败: {e}")
            return []
    
    def build_context(self, query: str, retrieved_files: List[Dict]) -> str:
        """构建RAG上下文 - 优化版本，更简洁"""
        if not retrieved_files:
            return "未找到相关文件内容。"
        
        context_parts = [f"问题: {query}\n\n相关文件:\n"]
        current_length = len("".join(context_parts))
        
        for i, file_info in enumerate(retrieved_files, 1):
            # 简化文件信息，只保留关键内容
            file_context = f"{i}. {file_info['file_name']} ({file_info['file_type']}) - 相似度: {file_info['similarity_score']:.2f}\n"
            
            if current_length + len(file_context) > self.max_context_length:
                break
            
            context_parts.append(file_context)
            current_length += len(file_context)
        
        context_parts.append("\n请基于以上文件回答问题。")
        return "".join(context_parts)
    
    def generate_response(self, query: str, context: str) -> str:
        """生成RAG响应"""
        prompt = f"{context}\n\n请基于以上文件信息回答用户问题，并标注信息来源："
        
        try:
            response = self.ollama_client.generate(
                model=self.current_model,
                prompt=prompt,
                system=self.system_prompt,
                options={
                    "temperature": 0.3,  # 降低创造性，加快生成速度
                    "top_p": 0.8,
                    "num_predict": 1024,  # 限制输出长度，加快生成
                    "num_ctx": 4096      # 上下文窗口大小
                }
            )

            return response
        except Exception as e:
            return f"生成回答时出现错误: {str(e)}"
    
    def generate_response_stream(self, query: str, context: str):
        """流式生成RAG响应"""
        prompt = f"{context}\n\n请基于以上文件信息回答用户问题，并标注信息来源："
        
        try:
            for chunk in self.ollama_client.generate_stream(
                model=self.current_model,
                prompt=prompt,
                system=self.system_prompt,
                options={
                    "temperature": 0.7,
                    "top_p": 0.9,
                    "max_tokens": 2000
                }
            ):
                yield chunk
        except Exception as e:
            yield f"生成回答时出现错误: {str(e)}"
    
    def chat(self, user_message: str, stream: bool = False) -> Tuple[str, List[Dict]]:
        """
        处理用户消息并返回回答
        
        Returns:
            Tuple[回答内容, 引用的文件列表]
        """
        # 默认返回值，确保所有情况都有返回
        response = "处理中出现未知错误"
        sources = []
        
        try:
            start_time = time.time()
            
            print(f"🔍 搜索相关文件...")
            # 1. 检索相关文件
            retrieved_files = self.search_files(user_message)
            
            if not retrieved_files:
                response = "抱歉，我在您的文件中没有找到相关信息来回答这个问题。请尝试使用不同的关键词。"
                sources = []
                print(f"⚠️ 未找到相关文件")
            else:
                print(f"📄 找到 {len(retrieved_files)} 个相关文件")
                
                # 2. 构建上下文
                context = self.build_context(user_message, retrieved_files)
                print(f"📝 构建上下文完成，长度: {len(context)} 字符")
                
                # 3. 生成回答
                print(f"🤖 正在生成回答...")
                if stream:
                    # 流式处理 - 这是一个生成器函数的情况
                    def stream_generator():
                        response_text = ""
                        try:
                            for chunk in self.generate_response_stream(user_message, context):
                                response_text += chunk
                                yield chunk, retrieved_files
                        except Exception as e:
                            yield f"流式生成错误: {str(e)}", []
                        finally:
                            # 记录对话历史
                            self._record_chat_history(user_message, response_text, retrieved_files)
                    
                    return stream_generator()
                else:
                    # 非流式处理
                    response = self.generate_response(user_message, context)
                    sources = retrieved_files
                    print(f"✅ 生成回答完成，长度: {len(response)} 字符")
            
            # 4. 记录对话历史
            self._record_chat_history(user_message, response, sources)
            
            elapsed = time.time() - start_time
            print(f"✅ 回答生成完成 (耗时 {elapsed:.2f}秒)")
            
            # 确保response不为空
            if not response or response.strip() == "":
                response = "处理完成，但没有生成有效回答"
            
            # 确保总是返回元组格式
            return response, sources
            
        except Exception as e:
            error_msg = f"处理消息时出现错误: {str(e)}"
            print(f"❌ {error_msg}")
            
            # 记录错误到历史
            try:
                self._record_chat_history(user_message, error_msg, [])
            except:
                pass  # 如果记录历史也失败，忽略
            
            # 即使出现异常也返回正确格式
            return error_msg, []
    
    def _record_chat_history(self, user_message: str, response: str, sources: List[Dict]):
        """记录对话历史的辅助方法"""
        try:
            user_msg = ChatMessage(
                role="user",
                content=user_message,
                timestamp=time.time()
            )
            
            assistant_msg = ChatMessage(
                role="assistant", 
                content=response,
                timestamp=time.time(),
                sources=sources
            )
            
            self.chat_history.extend([user_msg, assistant_msg])
        except Exception as e:
            print(f"⚠️ 记录对话历史失败: {e}")
    
    def get_chat_history(self) -> List[ChatMessage]:
        """获取对话历史"""
        return self.chat_history.copy()
    
    def clear_history(self):
        """清空对话历史"""
        self.chat_history.clear()
        print("🗑️ 对话历史已清空")
    
    def switch_model(self, model_name: str) -> bool:
        """切换模型"""
        models = self.ollama_client.list_models()
        model_names = [m['name'] for m in models]
        
        if model_name in model_names:
            self.current_model = model_name
            print(f"🔄 已切换到模型: {model_name}")
            return True
        else:
            print(f"❌ 模型 {model_name} 不存在")
            print(f"可用模型: {', '.join(model_names)}")
            return False
    
    def get_available_models(self) -> List[str]:
        """获取可用模型列表"""
        models = self.ollama_client.list_models()
        return [m['name'] for m in models]
    
    def update_config(self, **kwargs):
        """更新配置"""
        for key, value in kwargs.items():
            if hasattr(self, key):
                setattr(self, key, value)
                print(f"✅ 配置已更新: {key} = {value}")
            else:
                print(f"⚠️ 未知配置项: {key}")
    
    def get_stats(self) -> Dict:
        """获取统计信息"""
        milvus_stats = self.search_tool.get_collection_stats()
        
        return {
            "current_model": self.current_model,
            "chat_messages": len(self.chat_history),
            "milvus_entities": milvus_stats['total_entities'],
            "config": {
                "max_context_files": self.max_context_files,
                "max_context_length": self.max_context_length,
                "similarity_threshold": self.similarity_threshold
            }
        }