from abc import ABC, abstractmethod
from typing import Dict, Any, List
from openai import OpenAI

class LLMBase(ABC):
    """大模型基础类"""
    
    @abstractmethod
    def generate_text(self, prompt: str, **kwargs) -> str:
        pass
    
    @abstractmethod
    def chat_completion(self, messages: List[Dict], **kwargs) -> str:
        pass

class OpenAILLM(LLMBase):
    """OpenAI大模型实现"""
    
    def __init__(self, api_key: str, model: str = "gpt-3.5-turbo"):
        self.client = OpenAI(api_key=api_key)
        self.model = model
    
    def generate_text(self, prompt: str, **kwargs) -> str:
        response = self.client.completions.create(
            model=self.model,
            prompt=prompt,
            **kwargs
        )
        return response.choices[0].text.strip()
    
    def chat_completion(self, messages: List[Dict], **kwargs) -> str:
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            **kwargs
        )
        return response.choices[0].message.content

class GiteeAILLM(LLMBase):
    """Gitee AI大模型实现"""
    
    def __init__(self, api_key: str, model: str = "internlm3-8b-instruct", base_url: str = "https://ai.gitee.com/v1"):
        self.client = OpenAI(
            base_url=base_url,
            api_key=api_key,
            default_headers={"X-Failover-Enabled": "true"}
        )
        self.model = model
    
    def generate_text(self, prompt: str, **kwargs) -> str:
        # 对于文本生成，包装成消息格式
        messages = [
            {
                "role": "system",
                "content": "You are a helpful and harmless assistant. You should think step-by-step."
            },
            {
                "role": "user",
                "content": prompt
            }
        ]
        return self.chat_completion(messages, **kwargs)
    
    def chat_completion(self, messages: List[Dict], **kwargs) -> str:
        # 设置默认参数
        default_params = {
            "model": self.model,
            "stream": False,
            "max_tokens": 1024,
            "temperature": 1,
            "top_p": 0.8,
            "frequency_penalty": 1,
            "extra_body": {
                "top_k": 40,
            }
        }
        
        # 合并用户传入的参数
        default_params.update(kwargs)
        default_params["messages"] = messages
        
        response = self.client.chat.completions.create(**default_params)
        
        if default_params["stream"]:
            # 流式响应处理
            full_response = ""
            for chunk in response:
                if len(chunk.choices) == 0:
                    continue
                delta = chunk.choices[0].delta
                # 如果是思考内容，以特殊方式处理
                if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
                    full_response += delta.reasoning_content
                elif delta.content:
                    full_response += delta.content
            return full_response
        else:
            # 非流式响应
            return response.choices[0].message.content

class LocalLLM(LLMBase):
    """本地大模型实现"""
    
    def __init__(self, model_path: str):
        self.model_path = model_path
        # 实现本地模型加载逻辑
    
    def generate_text(self, prompt: str, **kwargs) -> str:
        # 实现本地模型文本生成逻辑
        pass
    
    def chat_completion(self, messages: List[Dict], **kwargs) -> str:
        # 实现本地模型对话完成逻辑
        pass

class LLMFactory:
    """大模型工厂 - 支持多种大模型引擎"""
    
    _llm_types = {
        "openai": OpenAILLM,
        "gitee_ai": GiteeAILLM,
        "local": LocalLLM
    }
    
    @classmethod
    def create_llm(cls, llm_type: str, **config) -> LLMBase:
        """创建大模型实例"""
        if llm_type not in cls._llm_types:
            raise ValueError(f"Unsupported LLM type: {llm_type}")
        
        llm_class = cls._llm_types[llm_type]
        return llm_class(**config)
    
    @classmethod
    def register_llm(cls, name: str, llm_class: type):
        """注册新的大模型类型"""
        cls._llm_types[name] = llm_class