"""
OpenAI LLM Model Implementation

This module provides an implementation of the BaseAgentModel that supports
OpenAI and OpenAI-compatible APIs.
It follows an async-first approach, where the primary implementation is the
asynchronous streaming method (a_stream_run).
"""
import os
import sys
import httpx
import asyncio
from pathlib import Path
from typing import Dict, List, Optional, AsyncIterator, Callable, Any
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
if str(PROJECT_ROOT) not in sys.path:
    sys.path.append(str(PROJECT_ROOT))
import openai
from openai import OpenAI, AsyncOpenAI
from openai.types.chat import ChatCompletionChunk
from models.base_agent_model import (
    BaseAgentModel,
    AsyncResponseStream,
    StreamingChunk,
    ModelResponse
)

# Import configuration from config.py
from config import cfg

# =============================================================================
# 模型工厂函数
# =============================================================================

def create_model(model_name: str = None, **kwargs) -> 'LLMModel':
    """
    创建LLM模型实例
    
    Args:
        model_name: 模型名称，如果为None则使用配置文件中的默认模型
        **kwargs: 额外的配置参数，会覆盖配置文件中的设置
        
    Returns:
        LLMModel: 配置好的LLM模型实例
        
    Examples:
        >>> # 使用默认配置
        >>> llm = create_model()
        >>> # 使用指定模型
        >>> llm = create_model("custom-model")
        >>> # 覆盖配置
        >>> llm = create_model(api_key="custom-key")
    """
    # 使用配置文件中的默认设置
    config_data = {
        "model_name": model_name or cfg.llm["model_name"],
        "api_key": cfg.llm.get("api_key"),
        "base_url": cfg.llm.get("base_url"),
        "max_retries": cfg.system.get("max_retries", 3),
        "retry_delay": cfg.system.get("retry_delay", 20.0),
        "timeout": cfg.system.get("timeout", 60.0)
    }
    
    # 允许通过kwargs覆盖配置
    config_data.update(kwargs)
    
    config = LLMModelConfig(**config_data)
    return LLMModel(config)


class LLMModelConfig:
    """简化的LLM模型配置类"""
    def __init__(self, model_name: str, api_key: str = None, base_url: str = None,
                 max_retries: int = 3, retry_delay: float = 20.0, timeout: float = 60.0):
        self.model_name = model_name
        self.api_key = api_key
        self.base_url = base_url
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        self.timeout = timeout


class OpenAIProvider:
    """OpenAI provider implementation."""
    
    def __init__(self, config: LLMModelConfig):
        self.config = config
        self.model_name = config.model_name
        self.api_key = config.api_key
        self.base_url = config.base_url
        self._openai = openai
        self._ChatCompletionChunk = ChatCompletionChunk
    
        
        # Initialize clients
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )
        
        self.async_client = AsyncOpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )
    
    def preprocess_messages(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
        """预处理消息"""
        return messages
    
    async def create_stream(self, messages: List[Dict[str, str]], temperature: float, 
                           max_tokens: Optional[int], **kwargs) -> Any:
        processed_messages = self.preprocess_messages(messages)
        
        params = {
            "model": self.model_name,
            "messages": processed_messages,
            "temperature": temperature,
            "stream": True,
            **kwargs
        }
        
        if max_tokens is not None:
            params["max_tokens"] = max_tokens
        
        # Handle thinking mode for compatible models
        if 'thinking' in params:
            thinking_flag = params.pop('thinking')
            if thinking_flag:
                params['extra_body'] = {"thinking": {"type": "enabled"}}
            else:
                params['extra_body'] = {"thinking": {"type": "disabled"}}
        
        return await self.async_client.chat.completions.create(**params)
    
    def process_chunk(self, chunk) -> StreamingChunk[str]:
        """Process a streaming chunk from OpenAI."""
        if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
            content = chunk.choices[0].delta.reasoning_content
            is_reasoning = True
        else:
            content = chunk.choices[0].delta.content or ""
            is_reasoning = False
        
        is_finished = len(chunk.choices) > 0 and chunk.choices[0].finish_reason is not None
        
        return StreamingChunk(
            content=content,
            is_finished=is_finished,
            raw_chunk=chunk,
            is_reasoning=is_reasoning
        )


class LLMModel(BaseAgentModel):
    """
    OpenAI LLM model implementation.
    
    This class provides a concrete implementation of the BaseAgentModel that supports
    OpenAI and OpenAI-compatible APIs.
    Following the async-first approach, it delegates to the OpenAI provider
    for the actual implementation.
    """
    
    def __init__(
        self,
        config: LLMModelConfig,
        **kwargs
    ):
        """
        Initialize the LLM model with OpenAI provider.
        
        Args:
            config: LLMModelConfig instance specifying configuration
            **kwargs: Additional configuration parameters
        """
        super().__init__(config.model_name, **kwargs)
        
        self.config = config
        self.model_name = config.model_name
        
        # Create the OpenAI provider
        self.provider = OpenAIProvider(config)
    
    def preprocess_messages(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
        """Preprocess messages using the provider's preprocessing."""
        return self.provider.preprocess_messages(messages)
    

    async def a_run_with_semaphore(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
        max_retries: Optional[int] = None,
        retry_delay: Optional[float] = None,
        timeout: Optional[float] = None,
        semaphore: Optional[asyncio.Semaphore] = None,
        **kwargs
    ) -> ModelResponse[str]:
        """
        Run the model asynchronously and stream the response with retry mechanism.
        
        This is the primary implementation that all other methods
        (run, a_run, stream_run) will use as their base.
        """
        async with semaphore:
            try:
                response = await self.a_run(
                    messages, 
                    temperature=temperature, 
                    max_tokens=max_tokens, 
                    max_retries=max_retries, 
                    retry_delay=retry_delay, 
                    timeout=timeout, 
                    **kwargs
                )
                return response
            except Exception as e:
                print(f"Error: {e}")
                return None


    async def a_run(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
        verbose: bool = False,
        max_retries: Optional[int] = None,
        retry_delay: Optional[float] = None,
        timeout: Optional[float] = None,
        post_process_func: Optional[Callable[[str], str]] = None,
        **kwargs
    ) -> ModelResponse[str]:
        """
        Run the model asynchronously and return a complete response.
        
        This is a wrapper around a_stream_run() that collects all chunks
        and combines them into a single response. Subclasses should
        implement a_stream_run() as the primary method and this method
        will be handled automatically.
        
        Args:
            messages: List of message dictionaries with 'role' and 'content' keys
            temperature: Sampling temperature (0.0 to 1.0)
            max_tokens: Maximum number of tokens to generate
            **kwargs: Additional model-specific parameters
            
        Returns:
            A ModelResponse containing the generated content
        """

        if max_retries is None:
            max_retries = getattr(self, 'config', LLMModelConfig("", "", "")).max_retries
        if retry_delay is None:
            retry_delay = getattr(self, 'config', LLMModelConfig("", "", "")).retry_delay
        if timeout is None:
            timeout = 60

        for attempt in range(max_retries + 1):
            try:
                # Get the stream
                stream = await self.a_stream_run(
                    messages=messages,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    timeout=timeout,
                    **kwargs
                )
                
                # Collect all chunks
                reasoning_content = ""
                full_content = ""
                raw_chunks = []
                
                async for chunk in stream:
                    if chunk.is_reasoning:
                        reasoning_content += chunk.content
                    else:
                        full_content += chunk.content
                    if chunk.raw_chunk is not None:
                        raw_chunks.append(chunk.raw_chunk)
                        if verbose:
                            print(chunk.content, end="", flush=True)
            
                if post_process_func is not None:
                    proc_response = post_process_func(full_content)
                else:
                    proc_response = None

                # Create a response with the collected content
                return ModelResponse(
                    content=self.postprocess_response(full_content),
                    reasoning_content=reasoning_content,
                    model_name=self.model_name,
                    raw_response=raw_chunks if raw_chunks else None,
                    proc_response=proc_response
                )
            except Exception as e:
                if attempt < max_retries:
                    print(f"🔄 LLM API调用失败 (尝试 {attempt + 1}/{max_retries + 1}): {type(e).__name__}: {e}")
                    print(f"⏳ 等待 {retry_delay} 秒后重试...")
                    await asyncio.sleep(retry_delay)
                    continue
                else:
                    print(f"❌ LLM API调用最终失败，已重试 {max_retries} 次: {type(e).__name__}: {e}")
                    raise
    

    async def a_stream_run(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
        max_retries: Optional[int] = None,
        retry_delay: Optional[float] = None,
        timeout: Optional[float] = None,
        **kwargs
    ) -> AsyncResponseStream[str]:
        """
        Run the model asynchronously and stream the response with retry mechanism.
        
        This is the primary implementation that all other methods
        (run, a_run, stream_run) will use as their base.
        
        Args:
            messages: List of message dictionaries with 'role' and 'content' keys
            temperature: Sampling temperature (0.0 to 1.0)
            max_tokens: Maximum number of tokens to generate
            max_retries: Maximum number of retry attempts (default: 3)
            retry_delay: Delay between retries in seconds (default: 20.0)
            timeout: Timeout for each attempt in seconds (default: 60.0)
            **kwargs: Additional model-specific parameters
            
        Returns:
            An AsyncResponseStream that yields chunks of the generated content
        """
        
        # 使用配置中的默认值（如果未指定）
        if max_retries is None:
            max_retries = getattr(self, 'config', LLMModelConfig("", "", "")).max_retries
        if retry_delay is None:
            retry_delay = getattr(self, 'config', LLMModelConfig("", "", "")).retry_delay
        if timeout is None:
            timeout = getattr(self, 'config', LLMModelConfig("", "", "")).timeout
        
        for attempt in range(max_retries + 1):
            try:
                return await asyncio.wait_for(
                    self._internal_a_stream_run(messages, temperature, max_tokens, **kwargs),
                    timeout=timeout
                )
            except (
                asyncio.TimeoutError,
                ConnectionError,
                TimeoutError
            ) as e:
                # Handle OpenAI-specific exceptions
                should_retry = False
                try:
                    import openai
                    if isinstance(e, (openai.APITimeoutError, openai.APIConnectionError)):
                        should_retry = True
                except ImportError:
                    pass
                
                # Always retry on these common exceptions
                if isinstance(e, (asyncio.TimeoutError, ConnectionError, TimeoutError)):
                    should_retry = True
                
                if should_retry and attempt < max_retries:
                    print(f"🔄 LLM API调用失败 (尝试 {attempt + 1}/{max_retries + 1}): {type(e).__name__}: {e}")
                    print(f"⏳ 等待 {retry_delay} 秒后重试...")
                    await asyncio.sleep(retry_delay)
                    continue
                else:
                    print(f"❌ LLM API调用最终失败，已重试 {max_retries} 次: {type(e).__name__}: {e}")
                    raise


    async def _internal_a_stream_run(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
        **kwargs
    ) -> AsyncResponseStream[str]:
        """
        Internal implementation of async streaming run without retry logic.
        
        Args:
            messages: List of message dictionaries with 'role' and 'content' keys
            temperature: Sampling temperature (0.0 to 1.0)
            max_tokens: Maximum number of tokens to generate
            **kwargs: Additional model-specific parameters
            
        Returns:
            An AsyncResponseStream that yields chunks of the generated content
        """
        # Get the stream from the OpenAI provider
        stream = await self.provider.create_stream(messages, temperature, max_tokens, **kwargs)
        
        # Create async iterator that processes chunks using OpenAI provider's chunk processor
        async def chunk_iterator() -> AsyncIterator[StreamingChunk[str]]:
            # Handle OpenAI and OpenAI-compatible streaming
            async for chunk in stream:
                if hasattr(chunk, 'choices') and not chunk.choices:
                    continue
                yield self.provider.process_chunk(chunk)
        
        return AsyncResponseStream(
            iterator=chunk_iterator(),
            model_name=self.model_name
        )


# =============================================================================
# 全局模型实例
# =============================================================================

# 创建全局LLM实例
GLOBAL_LLM = create_model()

# 创建thinking模型实例
try:
    GLOBAL_THINKING_LLM = create_model(
        model_name=cfg.llm_thinking["model_name"],
        api_key=cfg.llm_thinking.get("api_key"),
        base_url=cfg.llm_thinking.get("base_url")
    )
except Exception as e:
    print(f"加载thinking模型失败，使用默认模型替代: {e}")
    GLOBAL_THINKING_LLM = GLOBAL_LLM

if __name__ == "__main__":
    import asyncio
    
    async def test_llm():
        """测试LLM模型是否正常工作"""
        try:
            response = await GLOBAL_LLM.a_run(messages=[{"role": "user", "content": "你好"}])
            print(f"✅ LLM测试成功: {response.content[:50]}...")
        except Exception as e:
            print(f"❌ LLM测试失败: {e}")
    
    # 运行异步测试
    asyncio.run(test_llm())