from abc import ABC, abstractmethod
import openai
import requests
import json
import logging

logger = logging.getLogger(__name__)


class LLM(ABC):
    """大语言模型抽象基类"""
    
    @abstractmethod
    def response(self, dialogue):
        """生成对话响应（抽象方法）
        
        Args:
            dialogue: 对话历史列表
            
        Returns:
            响应生成器
        """
        pass


class OpenAILLM(LLM):
    """大语言模型API调用实现类"""
    
    def __init__(self, config):
        """初始化 LLM
        
        Args:
            config: 配置字典，包含model_name, api_key, url等参数
        """
        self.model_name = config.get("model_name")
        self.api_key = config.get("api_key")
        self.base_url = config.get("url")
        self.client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)

    def response(self, dialogue):
        """生成流式对话响应
        
        Args:
            dialogue: 对话历史列表
            
        Yields:
            str: 响应内容块
        """
        try:
            responses = self.client.chat.completions.create(
                model=self.model_name,
                messages=dialogue,
                stream=True
            )
            for chunk in responses:
                yield chunk.choices[0].delta.content
        except Exception as e:
            logger.error(f"响应生成错误: {e}")

    def response_call(self, dialogue, functions_call):
        """生成带函数调用的流式对话响应
        
        Args:
            dialogue: 对话历史列表
            functions_call: 函数调用定义
            
        Yields:
            tuple: (响应内容块, 工具调用信息)
        """
        try:
            responses = self.client.chat.completions.create(
                model=self.model_name,
                messages=dialogue,
                stream=True,
                tools=functions_call
            )
            for chunk in responses:
                yield chunk.choices[0].delta.content, chunk.choices[0].delta.tool_calls
        except Exception as e:
            logger.error(f"响应生成错误: {e}")


class OllamaLLM(LLM):
    """Ollama大语言模型实现类"""
    
    def __init__(self, config):
        """初始化Ollama LLM
        
        Args:
            config: 配置字典，包含model_name, url等参数
        """
        self.model_name = config.get("model_name", "qwen2.5")
        self.base_url = config.get("url", "http://localhost:11434/api/chat") # Ollama统一接口，用于调用本地模型

    def response(self, dialogue):
        """生成流式对话响应
        
        Args:
            dialogue: 对话历史列表
            
        Yields:
            str: 响应内容块
        """
        payload = {
            "model": self.model_name,
            "messages": dialogue,
            "stream": True
        }
        try:
            resp = requests.post(self.base_url, json=payload, stream=True)
            resp.raise_for_status()
            for line in resp.iter_lines():
                if not line:
                    continue
                data = json.loads(line.decode())
                content = data.get("message", {}).get("content")
                if content:
                    yield content
        except Exception as e:
            logger.error(f"OllamaLLM流式响应错误: {e}")

    def response_call(self, dialogue, tools):
        """生成带工具调用的流式对话响应
        
        Args:
            dialogue: 对话历史列表
            tools: 工具定义列表，例如[{"type":"function","function":{...}}, ...]
            
        Yields:
            tuple: (响应内容块, 工具调用信息)
        """
        payload = {
            "model": self.model_name,
            "messages": dialogue,
            "stream": True,
            "tools": tools
        }
        try:
            resp = requests.post(self.base_url, json=payload, stream=True)
            resp.raise_for_status()
            for line in resp.iter_lines():
                if not line:
                    continue
                data = json.loads(line.decode())
                msg = data.get("message", {})
                content = msg.get("content")
                tool_calls = msg.get("tool_calls")
                yield content, tool_calls
        except Exception as e:
            logger.error(f"OllamaLLM工具调用错误: {e}")


def create_instance(class_name, *args, **kwargs):
    """创建类实例的工厂函数
    
    Args:
        class_name: 类名字符串
        *args: 位置参数
        **kwargs: 关键字参数
        
    Returns:
        object: 类实例
        
    Raises:
        ValueError: 当类不存在时抛出
    """
    # 获取类对象
    cls = globals().get(class_name)
    if cls:
        # 创建并返回实例
        return cls(*args, **kwargs)
    else:
        raise ValueError(f"类 {class_name} 不存在")


if __name__ == "__main__":
    # 配置参数
    config = {
        "model_name": "deepseek-chat",  # 根据需要换成 deepseek 的模型名
        "api_key": "sk-d1eb547edca645cc8054351d46799f11",
        "url": "https://api.deepseek.com"
    }

    # 创建 DeepseekLLM 的实例
    deepseek = create_instance("OpenAILLM", config)

    dialogue = [
        {"role": "user", "content": "hello"}
    ]

    # 打印逐步生成的响应内容
    for chunk in deepseek.response(dialogue):
        if chunk:  # 避免打印 None
            print(chunk, end="", flush=True)