import requests
import json
import logging

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 从配置文件读取 Ollama 服务地址和默认模型
OLLAMA_URL = "http://localhost:11434"
DEFAULT_MODEL = "deepseek-r1:8b"



def generate_text(model: str, prompt: str, stream: bool = True):
    """
    调用 Ollama 的生成文本接口
    :param model: 模型名称（如 "llama2"）
    :param prompt: 输入的提示文本
    :param stream: 是否使用流式传输
    :return: 生成的文本
    """
    url = f"{OLLAMA_URL}/api/generate"
    payload = {
        "model": model,
        "prompt": prompt,
        "stream": stream
    }
    try:
        response = requests.post(url, json=payload)
        response.raise_for_status()
        if stream:
            full_response = ""
            for line in response.iter_lines():
                if line:
                    try:
                        line_str = line.decode('utf-8')
                        if line_str.startswith('data:'):
                            line_str = line_str[5:]
                        data = json.loads(line_str)
                        if 'response' in data:
                            full_response += data['response']
                    except json.JSONDecodeError as e:
                        logging.error(f"JSON 解析错误: {e}")
            return full_response
        else:
            return response.json().get("response")
    except requests.RequestException as e:
        logging.error(f"请求出错: {e}")
        return None
    except json.JSONDecodeError as e:
        logging.error(f"JSON 解析错误: {e}")
        return None

def chat(model: str, messages: list, stream: bool = False):
    """
    调用 Ollama 的聊天接口
    :param model: 模型名称（如 "llama2"）
    :param messages: 对话历史记录，格式为 [{"role": "user", "content": "..."}, ...]
    :param stream: 是否使用流式传输
    :return: 模型的回复
    """
    url = f"{OLLAMA_URL}/api/chat"
    payload = {
        "model": model,
        "messages": messages,
        "stream": stream
    }
    try:
        response = requests.post(url, json=payload, stream=stream)
        response.raise_for_status()
        if stream:
            full_response = ""
            for line in response.iter_lines():
                if line:
                    try:
                        line_str = line.decode('utf-8')
                        if line_str.startswith('data:'):
                            line_str = line_str[5:]
                        data = json.loads(line_str)
                        message = data.get('message', {})
                        if 'content' in message:
                            content = message['content']
                            full_response += content
                            # 实时输出内容块
                            print(content, end='', flush=True)
                    except json.JSONDecodeError as e:
                        logging.error(f"JSON 解析错误: {e}")
            print()  # 打印换行符
            return full_response
        else:
            return response.json().get("message", {}).get("content")
    except requests.RequestException as e:
        logging.error(f"请求出错: {e}")
        return None
    except json.JSONDecodeError as e:
        logging.error(f"JSON 解析错误: {e}")
        return None

def list_models():
    """
    列出本地已下载的模型
    :return: 模型列表
    """
    url = f"{OLLAMA_URL}/api/tags"
    try:
        response = requests.get(url)
        response.raise_for_status()
        return response.json().get("models", [])
    except requests.RequestException as e:
        logging.error(f"请求出错: {e}")
        return []
    except json.JSONDecodeError as e:
        logging.error(f"JSON 解析错误: {e}")
        return []

# 示例调用
if __name__ == "__main__":
    # 列出本地模型
    logging.info("本地模型列表:")
    models = list_models()
    for model in models:
        logging.info(f"- {model['name']}")
