import requests
import json
from typing import List, Dict, Optional, Callable, Any
import logging
import time
from education.age_group import AgeGroup
from education.education_utils import EducationUtils

class OllamaClient:
    """Ollama API客户端"""
    def __init__(self, host: str = "localhost", port: int = 11434):
        self.base_url = f"http://{host}:{port}"
        self.session = requests.Session()
        self.logger = logging.getLogger(__name__)
        self.edu_utils = EducationUtils()
        self.current_age_group = AgeGroup.MIDDLE_PRIMARY
        self.show_thinking = False  # 默认关闭思索模式
        
        # 测试连接
        try:
            self.test_connection()
            self.logger.info(f"成功连接到 Ollama 服务器: {self.base_url}")
        except Exception as e:
            self.logger.error(f"连接 Ollama 服务器失败: {str(e)}")
            raise ConnectionError(f"连接失败，请检查网络连接: {str(e)}")
    
    def _make_request(self, method: str, endpoint: str, **kwargs) -> Any:
        """发送API请求
        
        Args:
            method: 请求方法 (GET, POST等)
            endpoint: API端点
            **kwargs: 请求参数
            
        Returns:
            Any: 如果stream=True返回Response对象，否则返回解析后的JSON数据
            
        Raises:
            ConnectionError: 连接失败时抛出
        """
        url = f"{self.base_url}/api/{endpoint}"
        try:
            response = requests.request(method, url, **kwargs)
            response.raise_for_status()
            
            # 如果是流式请求，返回response对象
            if kwargs.get("stream", False):
                return response
                
            # 否则返回JSON数据
            return response.json()
            
        except requests.exceptions.RequestException as e:
            self.logger.error(f"API请求失败: {str(e)}")
            raise ConnectionError(f"无法连接到 Ollama 服务器: {str(e)}")
    
    def test_connection(self) -> bool:
        """测试服务器连接"""
        try:
            # 测试版本接口
            version_data = self._make_request("GET", "version", timeout=5)
            if not version_data:
                raise ConnectionError("服务器响应异常")
            
            # 测试模型列表接口
            models_data = self._make_request("GET", "tags", timeout=5)
            if not models_data or "models" not in models_data:
                raise ConnectionError("无法获取模型列表")
            
            # 检查是否有可用模型
            if not models_data["models"]:
                self.logger.warning("服务器上没有可用模型")
            
            return True
            
        except Exception as e:
            self.logger.error(f"连接测试失败: {str(e)}")
            raise ConnectionError(f"无法连接到服务器: {str(e)}")
    
    def list_models(self) -> List[Dict]:
        """获取可用模型列表"""
        try:
            response = self._make_request("GET", "tags")
            return [
                {
                    "name": model["name"],
                    "size": model.get("size", "未知"),
                    "modified_at": model.get("modified_at", "未知")
                }
                for model in response.get("models", [])
            ]
        except Exception as e:
            self.logger.error(f"获取模型列表失败: {str(e)}")
            return []
    
    def delete_model(self, model_name: str) -> bool:
        """删除模型"""
        try:
            self._make_request("DELETE", "delete", json={"name": model_name})
            return True
        except requests.exceptions.RequestException:
            return False
    
    def pull_model(self, model_name: str) -> bool:
        """下载模型"""
        try:
            data = {"name": model_name}
            self._make_request("POST", "pull", json=data)
            return True
        except Exception as e:
            self.logger.error(f"下载模型失败: {str(e)}")
            return False
    
    def get_model_info(self, model_name: str) -> Optional[dict]:
        """获取模型信息"""
        try:
            response = self._make_request("POST", "show", json={"name": model_name})
            return response.json()
        except requests.exceptions.RequestException:
            return None
    
    def handle_inappropriate_content(self, message: str) -> str:
        """处理不当内容"""
        if self.current_age_group == AgeGroup.LOWER_PRIMARY:
            return "这个问题对你来说还太早了哦！让我们聊一些有趣的话题吧！😊\n\n比如：\n- 你最喜欢的动物是什么？🐼\n- 今天学到了什么新知识？📚\n- 想玩什么有趣的游戏？🎮"
        elif self.current_age_group == AgeGroup.MIDDLE_PRIMARY:
            return "这个话题可能不太适合现在的你哦。不如我们来聊聊其他有意思的事情？😊"
        else:
            return "这个话题可能不太合适，建议你和父母或老师聊一聊。我们可以讨论一些更有趣的话题！"

    def set_thinking_mode(self, enabled: bool):
        """设置思索模式开关"""
        self.show_thinking = enabled
        self.logger.info(f"思索模式已{'开启' if enabled else '关闭'}")

    def _filter_response(self, content: str) -> str:
        """过滤不需要的响应内容"""
        # 需要过滤的内容列表
        filters = [
            "您好！我是由中国的深度求索（DeepSeek）公司开发的智能助手DeepSeek-R1。有关模型和产品的详细内容请参考官方文档。",
            # 可以添加其他需要过滤的内容
        ]
        
        # 过滤固定内容
        filtered_content = content
        for filter_text in filters:
            filtered_content = filtered_content.replace(filter_text, "").strip()
        
        # 处理思索内容
        if not self.show_thinking:
            # 移除所有<think></think>标签及其内容
            import re
            filtered_content = re.sub(r'<think>.*?</think>', '', filtered_content, flags=re.DOTALL)
        else:
            # 保留内容但格式化显示
            filtered_content = filtered_content.replace('<think>', '\n💭 思考过程：\n').replace('</think>', '\n')
        
        return filtered_content.strip()

    def chat(self, messages: List[Dict], **kwargs) -> Optional[Dict]:
        """发送聊天请求"""
        try:
            data = {
                "model": kwargs.get("model", "llama2"),
                "messages": messages,
                "stream": False,
                "temperature": kwargs.get("temperature", 0.7),
                "max_tokens": kwargs.get("max_length", 2000)
            }
            
            self.logger.info(f"发送聊天请求: {data}")
            response = self._make_request("POST", "chat", json=data)
            
            # 记录完整响应用于调试
            self.logger.debug(f"收到API响应: {response}")
            
            # Ollama API 可能的响应格式:
            # 1. {"response": "content", ...}
            # 2. {"message": {"content": "text"}, ...}
            # 3. {"content": "text", ...}
            if response:
                if "response" in response:
                    content = self._filter_response(response["response"])
                elif "message" in response and isinstance(response["message"], dict):
                    content = self._filter_response(response["message"].get("content", ""))
                elif "content" in response:
                    content = self._filter_response(response["content"])
                else:
                    self.logger.error(f"未知的API响应格式: {response}")
                    raise ValueError("API响应格式错误")
                
                # 只有在有内容时才返回
                if content:
                    return {
                        "role": "assistant",
                        "content": content
                    }
                else:
                    self.logger.warning("响应内容被完全过滤")
                    return None
            else:
                self.logger.error("API返回空响应")
                raise ValueError("API返回空响应")
            
        except Exception as e:
            self.logger.error(f"聊天请求失败: {str(e)}")
            raise
    
    def chat_stream(self, messages: List[Dict], **kwargs):
        """流式聊天请求"""
        try:
            # 确保消息列表包含系统提示
            if not any(m["role"] == "system" for m in messages):
                messages.insert(0, {
                    "role": "system",
                    "content": "你是一个有帮助的AI助手。请用简洁、专业的方式回答问题。"
                })
            
            data = {
                "model": kwargs.get("model", "llama2"),
                "messages": messages,
                "stream": True,
                "temperature": kwargs.get("temperature", 0.7),
                "max_tokens": kwargs.get("max_length", 2000),
                "context_window": 4096  # 增加上下文窗口大小
            }
            
            self.logger.info(f"发送流式聊天请求: {data}")
            response = self._make_request("POST", "chat", json=data, stream=True)
            
            # 用于累积部分响应
            accumulated_response = ""
            in_think_block = False  # 标记是否在思索块内
            
            # 处理流式响应
            for line in response.iter_lines():
                if line:
                    try:
                        chunk = json.loads(line)
                        # 记录调试信息
                        self.logger.debug(f"收到流式响应块: {chunk}")
                        
                        # 获取当前块的内容
                        if "response" in chunk:
                            content = chunk["response"]
                        elif "message" in chunk and isinstance(chunk["message"], dict):
                            content = chunk["message"].get("content", "")
                        elif "content" in chunk:
                            content = chunk["content"]
                        else:
                            self.logger.warning(f"未知的流式响应格式: {chunk}")
                            continue
                        
                        # 检查思索标签
                        if "<think>" in content:
                            in_think_block = True
                            accumulated_response += content
                            continue
                        
                        if "</think>" in content:
                            in_think_block = False
                            accumulated_response += content
                            # 处理完整的思索块
                            filtered_content = self._filter_response(accumulated_response)
                            accumulated_response = ""
                            if filtered_content:
                                yield {"response": filtered_content}
                            continue
                        
                        if in_think_block:
                            # 在思索块内，继续累积
                            accumulated_response += content
                        else:
                            # 不在思索块内，直接处理当前内容
                            if accumulated_response:
                                # 先处理累积的内容
                                filtered_content = self._filter_response(accumulated_response)
                                accumulated_response = ""
                                if filtered_content:
                                    yield {"response": filtered_content}
                            
                            # 处理当前内容
                            filtered_content = self._filter_response(content)
                            if filtered_content:
                                yield {"response": filtered_content}
                        
                    except json.JSONDecodeError as e:
                        self.logger.error(f"解析流式响应失败: {str(e)}")
                        continue
            
            # 处理剩余的累积响应
            if accumulated_response:
                filtered_content = self._filter_response(accumulated_response)
                if filtered_content:
                    yield {"response": filtered_content}
                    
        except Exception as e:
            self.logger.error(f"流式聊天请求失败: {str(e)}")
            raise

    def generate(self, prompt: str, model: str = "llama2", **params) -> Dict:
        """生成文本"""
        try:
            response = self._make_request(
                "POST",
                "generate",
                json={
                    "model": model,
                    "prompt": prompt,
                    **params
                }
            )
            return response.json()
        except requests.exceptions.RequestException as e:
            self.logger.error(f"Generate request failed: {str(e)}")
            raise

    def chat_with_file(self, file_content: str, question: str = None, model: str = "llama2", **params):
        """发送带文件内容的聊天请求"""
        try:
            # 构建系统提示，告诉模型如何处理文件内容
            system_message = {
                "role": "system",
                "content": "你是一个助手，可以帮助分析和回答关于文件内容的问题。请仔细阅读提供的文件内容，并回答用户的问题。"
            }
            
            # 构建文件内容消息
            file_message = {
                "role": "user",
                "content": f"这是需要分析的文件内容:\n\n{file_content}"
            }
            
            # 构建消息列表
            messages = [system_message, file_message]
            
            # 如果有具体问题，添加到消息列表
            if question:
                messages.append({
                    "role": "user",
                    "content": question
                })
            
            # 发送请求
            response = self._make_request(
                "POST",
                "chat",
                json={
                    "model": model,
                    "messages": messages,
                    "stream": False,
                    **params
                }
            )
            
            return response.json()
        
        except Exception as e:
            self.logger.error(f"Chat with file failed: {str(e)}")
            raise

    def chat_with_file_stream(self, file_content: str, question: str = None, model: str = "llama2", **params):
        """流式发送带文件内容的聊天请求"""
        try:
            # 构建系统提示
            system_message = {
                "role": "system",
                "content": "你是一个助手，可以帮助分析和回答关于文件内容的问题。请仔细阅读提供的文件内容，并回答用户的问题。"
            }
            
            # 构建文件内容消息
            file_message = {
                "role": "user",
                "content": f"这是需要分析的文件内容:\n\n{file_content}"
            }
            
            # 构建消息列表
            messages = [system_message, file_message]
            
            # 如果有具体问题，添加到消息列表
            if question:
                messages.append({
                    "role": "user",
                    "content": question
                })
            
            # 发送流式请求
            response = self._make_request(
                "POST",
                "chat",
                json={
                    "model": model,
                    "messages": messages,
                    "stream": True,
                    **params
                },
                stream=True
            )
            
            for line in response.iter_lines():
                if line:
                    yield json.loads(line)
                
        except Exception as e:
            self.logger.error(f"Stream chat with file failed: {str(e)}")
            raise 

    def set_age_group(self, age_group: AgeGroup):
        """设置当前年龄组"""
        self.current_age_group = age_group 

    def ensure_model_exists(self, model_name: str = "llama2") -> bool:
        """确保模型存在，如果不存在则下载"""
        try:
            # 检查模型是否存在
            models = self.list_models()
            if any(m.get("name") == model_name for m in models):
                self.logger.info(f"模型 {model_name} 已存在")
                return True
            
            # 模型不存在，开始下载
            self.logger.info(f"开始下载模型 {model_name}")
            success = self.pull_model(model_name)
            
            if success:
                self.logger.info(f"模型 {model_name} 下载完成")
                return True
            else:
                self.logger.error(f"模型 {model_name} 下载失败")
                return False
            
        except Exception as e:
            self.logger.error(f"检查/下载模型失败: {str(e)}")
            return False 

    def list_available_models(self) -> List[str]:
        """获取可用的模型列表"""
        try:
            response = self._make_request("GET", "tags")
            models = response.json().get("models", [])
            return [m.get("name") for m in models if m.get("name")]
        except Exception as e:
            self.logger.error(f"获取模型列表失败: {str(e)}")
            return [] 