import json
import requests
from typing import Generator, List, Dict

def chat_completion(
    messages: List[Dict[str, str]],
    model: str = "deepseek-r1:1.5b",
    stream: bool = True,
    base_url: str = "http://192.168.2.5:11434",
    **kwargs
) -> Generator[str, None, None]:
    """
    Ollama 对话接口实现
    """
    url = f"{base_url}/api/chat"
    payload = {
        "model": model,
        "messages": messages,
        "stream": stream,
        "options": kwargs
    }
    print(payload)

    with requests.post(url, json=payload, stream=stream, timeout=10) as response:
        response.raise_for_status()
        
        for line in response.iter_lines():
            if line:
                chunk = json.loads(line.decode('utf-8'))
                yield chunk

#ollama api ai 生成 流式返回 支持多轮对话
def chat_completion2(
    messages: List[Dict[str, str]],
    model: str = "deepseek-r1:1.5b",
    stream: bool = True,
    base_url: str = "http://192.168.2.5:11434",
    **kwargs
) -> Generator[str, None, None]:
    """
    Ollama 对话接口实现 - 增强版
    - 改进的错误处理
    - 可配置的超时设置
    - 自动重试机制
    """
    url = f"{base_url}/api/chat"
    payload = {
        "model": model,
        "messages": messages,
        "stream": stream,
        "options": kwargs
    }
    
    # 获取超时设置，默认为60秒
    timeout = kwargs.get("timeout", 60)
    max_retries = kwargs.get("max_retries", 1)
    retry_count = 0
    
    while retry_count <= max_retries:
        try:
            # 记录请求信息（不包含敏感内容）
            print(f"请求模型: {model}, 流式响应: {stream}, 重试次数: {retry_count}")
            
            response = requests.post(
                url, 
                json=payload, 
                stream=stream, 
                timeout=timeout
            )
            response.raise_for_status()
            
            # 重置重试计数
            retry_count = 0
            
            for line in response.iter_lines():
                if line:
                    try:
                        decoded_line = line.decode('utf-8')
                        chunk = json.loads(decoded_line)
                        if 'message' in chunk and 'content' in chunk['message']:
                            yield chunk['message']['content']
                        elif 'done' in chunk and chunk['done']:
                            yield "[DONE]"
                            return
                    except json.JSONDecodeError:
                        print(f"无法解析响应: {decoded_line}")
                        continue
            
            # 确保在流结束时发送[DONE]
            yield "[DONE]"
            return
        
        except requests.exceptions.Timeout:
            retry_count += 1
            if retry_count <= max_retries:
                print(f"请求超时，正在重试 ({retry_count}/{max_retries})...")
                yield f"[RETRY] 请求超时，正在重试 ({retry_count}/{max_retries})..."
            else:
                print("请求超时，已达到最大重试次数")
                yield "[ERROR] 请求超时，无法连接到AI服务"
                yield "[DONE]"
                return
        
        except requests.exceptions.ConnectionError:
            print("连接错误，无法连接到AI服务")
            yield "[ERROR] 连接错误，无法连接到AI服务"
            yield "[DONE]"
            return
            
        except requests.exceptions.RequestException as e:
            print(f"API请求失败: {str(e)}")
            yield f"[ERROR] API请求失败: {str(e)}"
            yield "[DONE]"
            return

def simple_generate(
    query: str, 
    model: str = "deepseek-r1:1.5b",
    base_url: str = "http://192.168.2.5:11434",
    **kwargs
) -> str:
    """
    使用 Ollama API 生成文本（非流式）
    
    参数:
        query: 输入的提示词
        model: 使用的模型名称
        base_url: Ollama 服务器地址
        **kwargs: 其他生成参数（如 temperature, max_tokens 等）
    
    返回:
        str: 模型生成的文本
    
    异常:
        可能抛出网络请求异常或 Ollama 服务异常
    """
    url = f"{base_url}/api/generate"
    
    # 构造请求体（确保 stream=False）
    payload = {
        "model": model,
        "prompt": query,
        "stream": False,  # 显式关闭流式传输
        "options": kwargs
    }
    
    try:
        # 发送请求（设置 stream=False 和超时时间）
        response = requests.post(
            url,
            json=payload,
            stream=False,
            timeout=30  # 总请求超时时间
        )
        response.raise_for_status()  # 检查HTTP状态码
        
        # 解析JSON响应
        result = response.json()
        
        # 返回生成的文本内容
        return result.get("response", "")
    
    except requests.exceptions.RequestException as e:
        # 处理网络请求异常
        raise ConnectionError(f"请求 Ollama 服务失败: {str(e)}")
    except KeyError:
        # 处理响应结构异常
        raise ValueError("响应中缺失 'response' 字段")
    except ValueError:
        # 处理 JSON 解析错误
        raise RuntimeError("无效的 JSON 响应")

#=============================================