"""OpenAI model adapter implementation."""

from typing import AsyncGenerator, Dict, Any, List, Optional
import asyncio
import tiktoken
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionChunk

from .base import BaseModelAdapter, ModelConfig, ModelResponse


class OpenAIAdapter(BaseModelAdapter):
    """Adapter for OpenAI GPT models."""
    
    # Pricing per 1K tokens (as of 2024)
    PRICING = {
        "gpt-4": {"prompt": 0.03, "completion": 0.06},
        "gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
        "gpt-3.5-turbo": {"prompt": 0.0015, "completion": 0.002},
        "gpt-3.5-turbo-16k": {"prompt": 0.003, "completion": 0.004}
    }
    
    def _initialize_client(self) -> None:
        """Initialize OpenAI client."""
        self._client = AsyncOpenAI(
            api_key=self.config.api_key,
            base_url=self.config.api_base,
            timeout=self.config.timeout,
            max_retries=0  # We handle retries ourselves
        )
        
        # Initialize tiktoken for accurate token counting
        try:
            self._tokenizer = tiktoken.encoding_for_model(self.config.model_name)
        except KeyError:
            # Fallback to cl100k_base for newer models
            self._tokenizer = tiktoken.get_encoding("cl100k_base")
    
    async def generate(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> ModelResponse:
        """Generate response from OpenAI model.
        
        Args:
            messages: Conversation messages
            **kwargs: Additional parameters for the API
            
        Returns:
            ModelResponse with generated content
        """
        # Merge config with kwargs
        params = {
            "model": self.config.model_name,
            "messages": messages,
            "temperature": self.config.temperature,
            "max_tokens": self.config.max_tokens,
            "top_p": self.config.top_p,
            "frequency_penalty": self.config.frequency_penalty,
            "presence_penalty": self.config.presence_penalty,
            "stream": False
        }
        params.update(kwargs)
        params.update(self.config.extra_params)
        
        # Use retry with backoff
        response = await self.retry_with_backoff(
            self._client.chat.completions.create,
            **params
        )
        
        # Parse response
        choice = response.choices[0]
        usage = {
            "prompt_tokens": response.usage.prompt_tokens,
            "completion_tokens": response.usage.completion_tokens,
            "total_tokens": response.usage.total_tokens
        }
        
        return ModelResponse(
            content=choice.message.content or "",
            model=response.model,
            usage=usage,
            finish_reason=choice.finish_reason,
            metadata={
                "id": response.id,
                "system_fingerprint": response.system_fingerprint
            }
        )
    
    async def stream(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """Stream response from OpenAI model.
        
        Args:
            messages: Conversation messages
            **kwargs: Additional parameters
            
        Yields:
            Content chunks
        """
        params = {
            "model": self.config.model_name,
            "messages": messages,
            "temperature": self.config.temperature,
            "max_tokens": self.config.max_tokens,
            "top_p": self.config.top_p,
            "frequency_penalty": self.config.frequency_penalty,
            "presence_penalty": self.config.presence_penalty,
            "stream": True
        }
        params.update(kwargs)
        params.update(self.config.extra_params)
        
        # Create stream with retry
        stream = await self.retry_with_backoff(
            self._client.chat.completions.create,
            **params
        )
        
        # Process stream
        async for chunk in stream:
            if chunk.choices and chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content
    
    def count_tokens(self, text: str) -> int:
        """Count tokens using tiktoken.
        
        Args:
            text: Input text
            
        Returns:
            Token count
        """
        return len(self._tokenizer.encode(text))
    
    def estimate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
        """Estimate cost for OpenAI models.
        
        Args:
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            
        Returns:
            Cost in USD
        """
        # Get pricing for model
        model_key = self.config.model_name
        if model_key not in self.PRICING:
            # Try to find a base model
            for key in self.PRICING:
                if key in model_key:
                    model_key = key
                    break
            else:
                # Default to GPT-3.5 pricing if unknown
                model_key = "gpt-3.5-turbo"
        
        pricing = self.PRICING[model_key]
        prompt_cost = (prompt_tokens / 1000) * pricing["prompt"]
        completion_cost = (completion_tokens / 1000) * pricing["completion"]
        
        return prompt_cost + completion_cost
    
    async def health_check(self) -> bool:
        """Check OpenAI API health.
        
        Returns:
            True if API is accessible
        """
        try:
            # Use models endpoint for lighter check
            models = await self._client.models.list()
            return len(models.data) > 0
        except Exception:
            return False