"""Base abstract model adapter interface."""

from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import AsyncGenerator, Dict, Any, Optional, List
from enum import Enum
import asyncio
from datetime import datetime


class ModelType(Enum):
    """Supported model types."""
    GPT4 = "gpt-4"
    GPT35 = "gpt-3.5-turbo"
    CLAUDE3 = "claude-3"
    ERNIE = "ernie-bot-4"
    SPARK = "spark"
    KIMI = "kimi"
    QWEN = "qwen-max"
    DEEPSEEK = "deepseek"
    DOUBAO = "doubao"
    HUNYUAN = "hunyuan"


@dataclass
class ModelConfig:
    """Configuration for a model adapter."""
    
    api_key: Optional[str] = None
    api_base: Optional[str] = None
    model_name: str = ""
    temperature: float = 0.7
    max_tokens: int = 2000
    top_p: float = 1.0
    frequency_penalty: float = 0.0
    presence_penalty: float = 0.0
    timeout: int = 60
    max_retries: int = 3
    retry_delay: float = 1.0
    stream: bool = True
    
    # Provider identification
    provider: Optional[str] = None
    provider_config: Dict[str, Any] = field(default_factory=dict)
    
    # Model-specific configurations
    extra_params: Dict[str, Any] = field(default_factory=dict)
    
    # Rate limiting
    requests_per_minute: int = 60
    tokens_per_minute: int = 90000
    
    # Configuration versioning
    config_version: str = "1.0"
    last_updated: Optional[datetime] = None
    
    # Additional metadata
    id: Optional[str] = None
    name: Optional[str] = None
    description: Optional[str] = None
    enabled: bool = True
    
    def validate(self) -> bool:
        """Validate configuration requirements."""
        if not self.model_name:
            raise ValueError("model_name is required")
        if self.temperature < 0 or self.temperature > 2:
            raise ValueError("temperature must be between 0 and 2")
        if self.max_tokens < 1:
            raise ValueError("max_tokens must be positive")
        
        # Validate URL format if provided
        if self.api_base:
            import re
            url_pattern = re.compile(
                r'^https?://'  # http:// or https://
                r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'  # domain
                r'localhost|'  # localhost
                r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ip
                r'(?::\d+)?'  # optional port
                r'(?:/?|[/?]\S+)$', re.IGNORECASE)
            if not url_pattern.match(self.api_base):
                raise ValueError(f"Invalid URL format: {self.api_base}")
        
        # Validate API key format if provider is known
        if self.api_key and self.provider:
            self._validate_api_key_format()
        
        return True
    
    def _validate_api_key_format(self) -> None:
        """Validate API key format based on provider."""
        if not self.api_key:
            return
            
        key_patterns = {
            "openai": r"^sk-[A-Za-z0-9]{48,}$",
            "anthropic": r"^sk-ant-[A-Za-z0-9]{40,}$",
            "deepseek": r"^sk-[A-Za-z0-9\-_]{32,}$",
        }
        
        pattern = key_patterns.get(self.provider)
        if pattern:
            import re
            if not re.match(pattern, self.api_key):
                # Log warning but don't fail - formats may change
                import logging
                logging.warning(f"API key format may be invalid for provider {self.provider}")


@dataclass
class ModelResponse:
    """Standardized response from model adapters."""
    
    content: str
    model: str
    usage: Dict[str, int] = field(default_factory=dict)
    metadata: Dict[str, Any] = field(default_factory=dict)
    created_at: datetime = field(default_factory=datetime.utcnow)
    finish_reason: str = "stop"
    
    @property
    def total_tokens(self) -> int:
        """Get total tokens used."""
        return self.usage.get("total_tokens", 0)
    
    @property
    def prompt_tokens(self) -> int:
        """Get prompt tokens used."""
        return self.usage.get("prompt_tokens", 0)
    
    @property
    def completion_tokens(self) -> int:
        """Get completion tokens used."""
        return self.usage.get("completion_tokens", 0)


class BaseModelAdapter(ABC):
    """Abstract base class for all model adapters."""
    
    def __init__(self, config: ModelConfig):
        """Initialize adapter with configuration.
        
        Args:
            config: Model configuration
        """
        self.config = config
        self.config.validate()
        self._client = None
        self._initialize_client()
    
    @abstractmethod
    def _initialize_client(self) -> None:
        """Initialize the provider-specific client."""
        pass
    
    @abstractmethod
    async def generate(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> ModelResponse:
        """Generate a response from the model.
        
        Args:
            messages: List of message dicts with 'role' and 'content'
            **kwargs: Additional model-specific parameters
            
        Returns:
            ModelResponse with generated content
        """
        pass
    
    @abstractmethod
    async def stream(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """Stream response from the model.
        
        Args:
            messages: List of message dicts with 'role' and 'content'
            **kwargs: Additional model-specific parameters
            
        Yields:
            String chunks of generated content
        """
        pass
    
    @abstractmethod
    def count_tokens(self, text: str) -> int:
        """Count tokens in text.
        
        Args:
            text: Input text to count
            
        Returns:
            Number of tokens
        """
        pass
    
    @abstractmethod
    def estimate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
        """Estimate cost for token usage.
        
        Args:
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            
        Returns:
            Estimated cost in USD
        """
        pass
    
    async def health_check(self) -> bool:
        """Check if the model is accessible and healthy.
        
        Returns:
            True if healthy, False otherwise
        """
        try:
            test_messages = [
                {"role": "user", "content": "Hi"}
            ]
            response = await self.generate(test_messages, max_tokens=5)
            return response.content is not None
        except Exception:
            return False
    
    def get_model_info(self) -> Dict[str, Any]:
        """Get model information and capabilities.
        
        Returns:
            Dictionary with model information
        """
        return {
            "model_name": self.config.model_name,
            "max_tokens": self.config.max_tokens,
            "temperature": self.config.temperature,
            "supports_streaming": self.config.stream,
            "timeout": self.config.timeout,
            "rate_limits": {
                "requests_per_minute": self.config.requests_per_minute,
                "tokens_per_minute": self.config.tokens_per_minute
            }
        }
    
    async def retry_with_backoff(
        self,
        func,
        *args,
        **kwargs
    ) -> Any:
        """Retry a function with exponential backoff.
        
        Args:
            func: Async function to retry
            *args: Function arguments
            **kwargs: Function keyword arguments
            
        Returns:
            Function result
            
        Raises:
            Last exception if all retries fail
        """
        last_exception = None
        delay = self.config.retry_delay
        
        for attempt in range(self.config.max_retries):
            try:
                return await func(*args, **kwargs)
            except Exception as e:
                last_exception = e
                if attempt < self.config.max_retries - 1:
                    await asyncio.sleep(delay)
                    delay *= 2  # Exponential backoff
        
        raise last_exception


# Usage Example Documentation
"""
Example usage of the BaseModelAdapter:

```python
from models.base import BaseModelAdapter, ModelConfig, ModelResponse

# Configure the adapter
config = ModelConfig(
    api_key="your-api-key",
    model_name="gpt-4",
    temperature=0.7,
    max_tokens=2000
)

# Initialize adapter (specific implementation)
adapter = OpenAIAdapter(config)

# Generate response
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello, how are you?"}
]

response = await adapter.generate(messages)
print(response.content)

# Stream response
async for chunk in adapter.stream(messages):
    print(chunk, end="")

# Count tokens
tokens = adapter.count_tokens("Hello world")
print(f"Token count: {tokens}")

# Estimate cost
cost = adapter.estimate_cost(prompt_tokens=100, completion_tokens=200)
print(f"Estimated cost: ${cost:.4f}")

# Health check
is_healthy = await adapter.health_check()
print(f"Model healthy: {is_healthy}")
```
"""