import requests
from typing import List, Dict, Any
from .llm_interface import LLMInterface

class OllamaLLM(LLMInterface):
    """Ollama模型实现"""
    
    def __init__(self, config: Dict[str, Any]):
        self.base_url = config.get("base_url", "http://localhost:11434")
        self.model_name = config.get("model_name", "llama2")
        self.default_params = {
            "temperature": config.get("temperature", 0.7),
            "top_p": config.get("top_p", 0.9),
            "top_k": config.get("top_k", 40),
            "repeat_penalty": config.get("repeat_penalty", 1.1),
            "num_ctx": config.get("num_ctx", 2048),
            "stream": config.get("stream", False)
        }
        
    def chat(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """调用Ollama API进行对话"""
        url = f"{self.base_url}/api/chat"
        
        # 合并默认参数和传入的参数
        params = {**self.default_params, **kwargs}
        
        print(f"Ollama API调用开始: {params}")

        response = requests.post(url, json={
            "model": self.model_name,
            "messages": messages,
            **params
        })
        
        if response.status_code != 200:
            raise Exception(f"Ollama API调用失败: {response.text}")
            
        return response.json()["message"]["content"]
    
    def get_model_name(self) -> str:
        return self.model_name