import requests
import json
import os
from models.database import Session, KnowledgeContent
from services.knowledge_service import KnowledgeService

class OllamaService:
    _instance = None
    
    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def __init__(self, host="http://localhost:11434"):
        if not hasattr(self, 'initialized'):
            self.host = host
            self.default_model = None
            self.chat_history = []
            self.knowledge_service = KnowledgeService()
            self.load_config()
            self.initialized = True
    
    def load_config(self):
        config_path = os.path.join("config", "model_config.json")
        try:
            if os.path.exists(config_path):
                with open(config_path, "r", encoding='utf-8') as f:
                    config = json.load(f)
                self.host = config.get("host", self.host)
                self.default_model = config.get("model", "")
                self.parameters = config.get("parameters", {
                    "temperature": 0.7,
                    "max_tokens": 4094,
                    "top_p": 0.9,
                    "context_length": 4096,  # 上下文长度
                    "memory_size": 20,  # 记住多少轮对话
                    "frequency_penalty": 0.5,  # 重复惩罚
                    "presence_penalty": 0.5,  # 主题惩罚
                })
                print(f"加载配置成功: host={self.host}, model={self.default_model}")
            else:
                print("配置文件不存在，使用默认配置")
                self.parameters = {
                    "temperature": 0.7,
                    "max_tokens": 4094,
                    "top_p": 0.9,
                    "context_length": 4096,
                    "memory_size": 20,
                    "frequency_penalty": 0.5,
                    "presence_penalty": 0.5
                }
        except Exception as e:
            print(f"加载配置失败: {str(e)}")
            # 使用默认配置
            self.parameters = {
                "temperature": 0.7,
                "max_tokens": 4094,
                "top_p": 0.9,
                "context_length": 4096,
                "memory_size": 20,
                "frequency_penalty": 0.5,
                "presence_penalty": 0.5
            }
    
    def list_models(self):
        try:
            response = requests.get(f"{self.host}/api/tags")
            if response.status_code == 200:
                models = [model['name'] for model in response.json()['models']]
                print(f"获取到的模型列表: {models}")
                return models
            print(f"获取模型列表失败，状态码: {response.status_code}")
            return []
        except Exception as e:
            print(f"获取模型列表失败: {str(e)}")
            return []
    
    def load_knowledge_base(self):
        """从数据库加载知识库内容"""
        self.knowledge_base = {}
        with Session() as session:
            contents = session.query(KnowledgeContent).all()
            for content in contents:
                self.knowledge_base[content.id] = content.content
    
    def train_knowledge(self, texts, file_id=None):
        """训练知识库"""
        try:
            for text in texts:
                self.knowledge_service.train_knowledge(text, file_id)
            return True
        except Exception as e:
            raise Exception(f"知识库训练失败: {str(e)}")
    
    def chat(self, model=None, prompt="", stream_callback=None):
        if not model:
            model = self.default_model
        
        if not model:
            return "未选择模型，请在模型配置中选择模型"
        
        try:
            relevant_texts = self.knowledge_service.search(prompt)
            system_prompt = "你是一个专业的智能助手，基于知识库内容准确回答。"
            
            if relevant_texts:
                system_prompt += "\n相关知识：\n"
                complete_texts = [r for r in relevant_texts if r.get('is_complete', False)]
                if complete_texts:
                    for result in complete_texts:
                        system_prompt += f"\n{result['text']}"
                else:
                    for result in relevant_texts:
                        system_prompt += f"\n{result['text']}"
            else:
                system_prompt += "\n\n注意：知识库中没有找到相关内容。"
            
            full_prompt = f"{system_prompt}\n\n历史对话:\n{self.get_chat_history()}\n\n用户: {prompt}\n助手:"
            
            if stream_callback:
                try:
                    response = requests.post(
                        f"{self.host}/api/generate",
                        json={
                            "model": model,
                            "prompt": full_prompt,
                            "stream": True,
                            "temperature": self.parameters.get("temperature", 0.7),
                            "max_tokens": self.parameters.get("max_tokens", 2048),
                            "top_p": self.parameters.get("top_p", 0.9),
                            "frequency_penalty": self.parameters.get("frequency_penalty", 0.5),
                            "presence_penalty": self.parameters.get("presence_penalty", 0.5),
                            "context_length": self.parameters.get("context_length", 4096),
                        },
                        stream=True,
                        timeout=60
                    )
                    
                    if response.status_code != 200:
                        error_msg = f"请求失败 (状态码: {response.status_code})"
                        stream_callback(error_msg)
                        return error_msg
                    
                    full_response = ""
                    for line in response.iter_lines():
                        if line:
                            try:
                                json_response = json.loads(line)
                                if 'response' in json_response:
                                    chunk = json_response['response']
                                    full_response += chunk
                                    stream_callback(chunk)
                            except json.JSONDecodeError:
                                continue
                    
                    self.chat_history.append({
                        "user": prompt,
                        "assistant": full_response
                    })
                    
                    return full_response
                    
                except Exception as e:
                    error_msg = f"发生错误: {str(e)}"
                    stream_callback(error_msg)
                    return error_msg
            else:
                # 保持现有的非流式输出代码
                response = requests.post(
                    f"{self.host}/api/generate",
                    json={
                        "model": model,
                        "prompt": full_prompt,
                        "stream": False,
                        "temperature": self.parameters.get("temperature", 0.7),
                        "max_tokens": self.parameters.get("max_tokens", 2048),
                        "top_p": self.parameters.get("top_p", 0.9),
                        "frequency_penalty": self.parameters.get("frequency_penalty", 0.5),
                        "presence_penalty": self.parameters.get("presence_penalty", 0.5),
                        "context_length": self.parameters.get("context_length", 4096),
                    },
                    timeout=60  # 增加超时时间
                )
                
                if response.status_code == 200:
                    assistant_response = response.json()['response']
                    # 保存对话历史
                    self.chat_history.append({
                        "user": prompt,
                        "assistant": assistant_response
                    })
                    return assistant_response
                elif response.status_code == 404:
                    return f"模型 {model} 未找到，请在模型配置中选择正确的模型"
                else:
                    return f"请求失败 (状态码: {response.status_code})，请检查模型配置和服务器状态"
                
        except requests.exceptions.ConnectionError:
            return "连接失败，请检查服务器地址是否正确"
        except requests.exceptions.Timeout:
            return "请求超时，请检查服务器状态"
        except Exception as e:
            return f"发生错误: {str(e)}" 
    
    def get_chat_history(self):
        """获取格式化的聊天历史"""
        context = ""
        memory_size = self.parameters.get("memory_size", 10)
        if self.chat_history:
            context = "\n".join([
                f"用户: {h['user']}\n助手: {h['assistant']}" 
                for h in self.chat_history[-memory_size:]
            ])
        return context 