from typing import List, Dict, Any, Optional
import requests
import json
from .base import BaseLLM

class OllamaLLM(BaseLLM):
    def __init__(
        self,
        model: str = "llama3.1",  # 更新为llama3.1版本
        base_url: str = "http://localhost:11434",
        temperature: float = 0.7,
        max_tokens: int = 2048,      # 增加默认token数
        system_prompt: str = "你是一个专业的AI助手，请用准确、专业的方式回答问题。"
    ):
        """
        初始化Ollama LLM
        参数:
            model: 模型名称，使用 "llama3.1"
            base_url: Ollama服务地址
            temperature: 温度参数，控制随机性
            max_tokens: 最大生成token数
            system_prompt: 系统提示词
        """
        self.model = model
        self.base_url = base_url.rstrip('/')
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.system_prompt = system_prompt
        
        # 验证服务和模型
        self._check_service()
    
    def _check_service(self):
        """检查Ollama服务和模型是否可用"""
        try:
            # 检查服务状态
            response = requests.get(f"{self.base_url}/api/tags")
            if response.status_code != 200:
                raise ConnectionError(f"Ollama服务响应异常: {response.status_code}")
            
            # 检查模型是否已下载
            print(f"正在检查模型 {self.model} 是否已下载...")
            models = response.json().get("models", [])
            if not any(self.model in model["name"] for model in models):
                print(f"提示：模型 {self.model} 未下载，请使用以下命令下载：")
                print(f"ollama pull {self.model}")
                
        except requests.exceptions.RequestException as e:
            raise ConnectionError(f"无法连接到Ollama服务: {str(e)}")
    
    def generate(
        self,
        prompt: str,
        context: Optional[List[Dict[str, Any]]] = None,
        **kwargs
    ) -> str:
        """
        生成回答
        参数:
            prompt: 用户问题
            context: 相关文档上下文
            **kwargs: 其他参数
        返回:
            生成的回答
        """
        # 构建完整提示词
        if context:
            full_prompt = self._build_prompt(prompt, context)
        else:
            full_prompt = prompt
            
        try:
            # 调用Ollama API
            response = requests.post(
                f"{self.base_url}/api/generate",
                json={
                    "model": self.model,
                    "prompt": full_prompt,
                    "system": self.system_prompt,
                    "temperature": self.temperature,
                    "max_tokens": self.max_tokens,
                    "stream": True,  # 启用流式输出
                    **kwargs
                }
            )
            
            if response.status_code != 200:
                raise Exception(f"API调用失败: {response.status_code}")
                
            # 处理流式响应
            response_text = ""
            for line in response.text.strip().split('\n'):
                if line:
                    try:
                        data = json.loads(line)
                        if "response" in data:
                            response_text += data["response"]
                            # 实时打印生成的文本
                            print(data["response"], end="", flush=True)
                    except json.JSONDecodeError:
                        continue
            
            print()  # 换行
            return response_text.strip()
            
        except Exception as e:
            error_msg = f"Ollama API调用出错: {str(e)}"
            print(error_msg)
            return error_msg
