"""Anthropic Claude model adapter implementation."""

from typing import AsyncGenerator, Dict, Any, List
import anthropic
from anthropic import AsyncAnthropic

from .base import BaseModelAdapter, ModelConfig, ModelResponse


class ClaudeAdapter(BaseModelAdapter):
    """Adapter for Anthropic Claude models."""
    
    # Pricing per 1M tokens (Claude 3 as of 2024)
    PRICING = {
        "claude-3-opus": {"prompt": 15.0, "completion": 75.0},
        "claude-3-sonnet": {"prompt": 3.0, "completion": 15.0},
        "claude-3-haiku": {"prompt": 0.25, "completion": 1.25},
        "claude-2.1": {"prompt": 8.0, "completion": 24.0},
        "claude-2.0": {"prompt": 8.0, "completion": 24.0}
    }
    
    def _initialize_client(self) -> None:
        """Initialize Anthropic client."""
        self._client = AsyncAnthropic(
            api_key=self.config.api_key,
            base_url=self.config.api_base,
            timeout=self.config.timeout,
            max_retries=0  # Handle retries ourselves
        )
    
    def _convert_messages(self, messages: List[Dict[str, str]]) -> tuple:
        """Convert messages to Claude format.
        
        Args:
            messages: OpenAI-style messages
            
        Returns:
            Tuple of (system_prompt, claude_messages)
        """
        system_prompt = ""
        claude_messages = []
        
        for msg in messages:
            if msg["role"] == "system":
                system_prompt = msg["content"]
            else:
                # Claude uses "user" and "assistant" roles
                claude_messages.append({
                    "role": msg["role"],
                    "content": msg["content"]
                })
        
        return system_prompt, claude_messages
    
    async def generate(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> ModelResponse:
        """Generate response from Claude model.
        
        Args:
            messages: Conversation messages
            **kwargs: Additional parameters
            
        Returns:
            ModelResponse with generated content
        """
        system_prompt, claude_messages = self._convert_messages(messages)
        
        params = {
            "model": self.config.model_name,
            "messages": claude_messages,
            "max_tokens": self.config.max_tokens,
            "temperature": self.config.temperature,
            "top_p": self.config.top_p,
            "stream": False
        }
        
        if system_prompt:
            params["system"] = system_prompt
        
        params.update(kwargs)
        params.update(self.config.extra_params)
        
        # Use retry with backoff
        response = await self.retry_with_backoff(
            self._client.messages.create,
            **params
        )
        
        # Parse response
        usage = {
            "prompt_tokens": response.usage.input_tokens,
            "completion_tokens": response.usage.output_tokens,
            "total_tokens": response.usage.input_tokens + response.usage.output_tokens
        }
        
        # Claude returns content blocks, extract text
        content = ""
        for block in response.content:
            if block.type == "text":
                content += block.text
        
        return ModelResponse(
            content=content,
            model=response.model,
            usage=usage,
            finish_reason=response.stop_reason or "stop",
            metadata={
                "id": response.id,
                "stop_sequence": response.stop_sequence
            }
        )
    
    async def stream(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """Stream response from Claude model.
        
        Args:
            messages: Conversation messages
            **kwargs: Additional parameters
            
        Yields:
            Content chunks
        """
        system_prompt, claude_messages = self._convert_messages(messages)
        
        params = {
            "model": self.config.model_name,
            "messages": claude_messages,
            "max_tokens": self.config.max_tokens,
            "temperature": self.config.temperature,
            "top_p": self.config.top_p,
            "stream": True
        }
        
        if system_prompt:
            params["system"] = system_prompt
        
        params.update(kwargs)
        params.update(self.config.extra_params)
        
        # Create stream with retry
        stream = await self.retry_with_backoff(
            self._client.messages.create,
            **params
        )
        
        # Process stream
        async for event in stream:
            if event.type == "content_block_delta":
                if event.delta.type == "text_delta":
                    yield event.delta.text
    
    def count_tokens(self, text: str) -> int:
        """Count tokens for Claude models.
        
        Claude uses a rough estimate of 1 token ≈ 4 characters.
        
        Args:
            text: Input text
            
        Returns:
            Estimated token count
        """
        # Claude's rough estimation
        return len(text) // 4
    
    def estimate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
        """Estimate cost for Claude models.
        
        Args:
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            
        Returns:
            Cost in USD
        """
        model_key = self.config.model_name
        
        # Find matching pricing
        for key in self.PRICING:
            if key in model_key:
                model_key = key
                break
        else:
            # Default to Claude 3 Haiku if unknown
            model_key = "claude-3-haiku"
        
        pricing = self.PRICING[model_key]
        # Claude pricing is per 1M tokens
        prompt_cost = (prompt_tokens / 1_000_000) * pricing["prompt"]
        completion_cost = (completion_tokens / 1_000_000) * pricing["completion"]
        
        return prompt_cost + completion_cost