import requests
import json
from typing import Dict, Any, List, Optional

class OllamaClient:
    def __init__(self, base_url: str = "http://localhost:11434"):
        """
        初始化Ollama服务
        :param base_url: Ollama服务地址，默认为本地的11434端口
        """
        self.base_url = base_url.rstrip('/')
        self.headers = {
            "Content-Type": "application/json",
            "Accept": "application/json"
        }

    def generate(
        self,
        model_name: str,
        prompt: str,
        parameters: Optional[Dict[str, Any]] = None
    ) -> str:
        """
        文本生成接口
        :param model_name: 模型名称（如："llama2", "mistral"）
        :param prompt: 输入提示词
        :param parameters: 模型参数（可选）
        :return: 生成的文本结果
        """
        endpoint = f"{self.base_url}/api/generate"
        payload = {
            "model": model_name,
            "prompt": prompt,
            "stream": False
        }

        if parameters:
            payload.update(parameters)

        try:
            response = requests.post(endpoint, headers=self.headers, data=json.dumps(payload))
            response.raise_for_status()
            return response.json()["response"]
        except requests.exceptions.RequestException as e:
            return f"Request error: {str(e)}"
        except KeyError:
            return "Invalid response format"

    def chat(
        self,
        model_name: str,
        messages: List[Dict[str, str]],
        parameters: Optional[Dict[str, Any]] = None
    ) -> str:
        """
        对话接口
        :param model_name: 模型名称
        :param messages: 消息历史（格式：[{"role": "user", "content": "..."}, ...]）
        :param parameters: 模型参数（可选）
        :return: 生成的对话响应
        """
        endpoint = f"{self.base_url}/api/chat"
        payload = {
            "model": model_name,
            "messages": messages,
            "stream": False
        }

        if parameters:
            payload.update(parameters)

        try:
            response = requests.post(endpoint, headers=self.headers, data=json.dumps(payload))
            response.raise_for_status()
            return response.json()["message"]["content"]
        except requests.exceptions.RequestException as e:
            return f"Request error: {str(e)}"
        except KeyError:
            return "Invalid response format"

# 示例用法
if __name__ == "__main__":
    # 初始化服务
    ollama = OllamaClient()
    #
    # # 示例1：文本生成
    text_response = ollama.generate(
        model_name="qwen2.5:3b",
        prompt="请用中文解释人工智能的基本概念",
        parameters={
            "temperature": 0.7,
            "max_tokens": 500
        }
    )
    print("文本生成结果：")
    print(text_response)
    print("\n" + "="*80 + "\n")

    # # 示例2：对话模式
    chat_history = [
        {"role": "user", "content": "你好，请介绍一下你自己"},
        {"role": "assistant", "content": "我是由Ollama驱动的AI助手，我可以帮助回答各种问题。"},
        {"role": "user", "content": "你能用中文进行对话吗？"}
    ]

    chat_response = ollama.chat(
        model_name="deepseek-r1:latest",
        messages=chat_history,
        parameters={
            "temperature": 0.9,
            "max_tokens": 300
        }
    )
    print("对话响应：")
    print(chat_response)