from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional, AsyncGenerator, Union, Tuple
from enum import Enum
import asyncio
import logging
import time
import json
import hashlib
from dataclasses import dataclass, asdict
from collections import defaultdict
import httpx
import tiktoken

logger = logging.getLogger(__name__)


class ModelProvider(str, Enum):
    """Supported AI model providers."""
    OPENAI = "openai"
    ANTHROPIC = "anthropic"
    GOOGLE = "google"
    

class ModelCapability(str, Enum):
    """AI model capabilities."""
    TEXT_GENERATION = "text_generation"
    CHAT_COMPLETION = "chat_completion"
    STREAMING = "streaming"
    FUNCTION_CALLING = "function_calling"
    VISION = "vision"


@dataclass
class ModelConfig:
    """Configuration for an AI model."""
    provider: ModelProvider
    model_name: str
    max_tokens: int = 4096
    temperature: float = 0.7
    top_p: float = 1.0
    frequency_penalty: float = 0.0
    presence_penalty: float = 0.0
    capabilities: List[ModelCapability] = None
    cost_per_token: float = 0.0
    rate_limit_rpm: int = 60  # requests per minute
    rate_limit_tpm: int = 40000  # tokens per minute
    
    def __post_init__(self):
        if self.capabilities is None:
            self.capabilities = [ModelCapability.TEXT_GENERATION, ModelCapability.CHAT_COMPLETION]


@dataclass 
class AIRequest:
    """AI service request data."""
    messages: List[Dict[str, str]]
    model_config: ModelConfig
    stream: bool = False
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None
    system_prompt: Optional[str] = None
    functions: Optional[List[Dict[str, Any]]] = None
    
    def to_cache_key(self) -> str:
        """Generate cache key for request."""
        data = {
            'messages': self.messages,
            'model': self.model_config.model_name,
            'max_tokens': self.max_tokens or self.model_config.max_tokens,
            'temperature': self.temperature or self.model_config.temperature,
            'system_prompt': self.system_prompt,
            'functions': self.functions
        }
        return hashlib.md5(json.dumps(data, sort_keys=True).encode()).hexdigest()


@dataclass
class AIResponse:
    """AI service response data."""
    content: str
    model: str
    provider: ModelProvider
    usage: Dict[str, int]
    cached: bool = False
    response_time: float = 0.0
    cost: float = 0.0
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        return asdict(self)


class AIProvider(ABC):
    """Abstract base class for AI providers."""
    
    def __init__(self, api_key: str, model_config: ModelConfig):
        self.api_key = api_key
        self.model_config = model_config
        self.client = None
        self._initialize_client()
        
    @abstractmethod
    def _initialize_client(self):
        """Initialize the API client."""
        pass
        
    @abstractmethod
    async def generate(self, request: AIRequest) -> AIResponse:
        """Generate response from AI model."""
        pass
        
    @abstractmethod
    async def generate_stream(self, request: AIRequest) -> AsyncGenerator[str, None]:
        """Generate streaming response from AI model."""
        pass
        
    def count_tokens(self, text: str) -> int:
        """Count tokens in text."""
        try:
            encoding = tiktoken.get_encoding("cl100k_base")
            return len(encoding.encode(text))
        except Exception as e:
            logger.warning(f"Token counting failed: {e}, using estimation")
            return len(text) // 4  # Rough estimation
            
    def calculate_cost(self, input_tokens: int, output_tokens: int) -> float:
        """Calculate API call cost."""
        return (input_tokens + output_tokens) * self.model_config.cost_per_token


class OpenAIProvider(AIProvider):
    """OpenAI API provider."""
    
    def __init__(self, api_key: str, model_config: ModelConfig, base_url: str = None):
        self.base_url = base_url
        super().__init__(api_key, model_config)
    
    def _initialize_client(self):
        """Initialize OpenAI client."""
        try:
            import openai
            client_kwargs = {"api_key": self.api_key}
            if self.base_url:
                client_kwargs["base_url"] = self.base_url
            self.client = openai.AsyncOpenAI(**client_kwargs)
        except ImportError:
            raise ImportError("OpenAI package not installed. Run: pip install openai")
            
    async def generate(self, request: AIRequest) -> AIResponse:
        """Generate response using OpenAI API."""
        start_time = time.time()
        
        try:
            messages = request.messages.copy()
            if request.system_prompt:
                messages.insert(0, {"role": "system", "content": request.system_prompt})
                
            params = {
                "model": self.model_config.model_name,
                "messages": messages,
                "max_tokens": request.max_tokens or self.model_config.max_tokens,
                "temperature": request.temperature or self.model_config.temperature,
                "top_p": self.model_config.top_p,
                "frequency_penalty": self.model_config.frequency_penalty,
                "presence_penalty": self.model_config.presence_penalty,
            }
            
            if request.functions:
                params["functions"] = request.functions
                params["function_call"] = "auto"
                
            response = await self.client.chat.completions.create(**params)
            
            usage = {
                "prompt_tokens": response.usage.prompt_tokens,
                "completion_tokens": response.usage.completion_tokens,
                "total_tokens": response.usage.total_tokens
            }
            
            cost = self.calculate_cost(usage["prompt_tokens"], usage["completion_tokens"])
            response_time = time.time() - start_time
            
            return AIResponse(
                content=response.choices[0].message.content,
                model=self.model_config.model_name,
                provider=ModelProvider.OPENAI,
                usage=usage,
                response_time=response_time,
                cost=cost
            )
            
        except Exception as e:
            logger.error(f"OpenAI API error: {e}")
            raise
            
    async def generate_stream(self, request: AIRequest) -> AsyncGenerator[str, None]:
        """Generate streaming response using OpenAI API."""
        try:
            messages = request.messages.copy()
            if request.system_prompt:
                messages.insert(0, {"role": "system", "content": request.system_prompt})
                
            params = {
                "model": self.model_config.model_name,
                "messages": messages,
                "max_tokens": request.max_tokens or self.model_config.max_tokens,
                "temperature": request.temperature or self.model_config.temperature,
                "stream": True
            }
            
            stream = await self.client.chat.completions.create(**params)
            async for chunk in stream:
                if chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
                    
        except Exception as e:
            logger.error(f"OpenAI streaming error: {e}")
            raise


class AnthropicProvider(AIProvider):
    """Anthropic Claude API provider."""
    
    def _initialize_client(self):
        """Initialize Anthropic client."""
        try:
            import anthropic
            self.client = anthropic.AsyncAnthropic(api_key=self.api_key)
        except ImportError:
            raise ImportError("Anthropic package not installed. Run: pip install anthropic")
            
    async def generate(self, request: AIRequest) -> AIResponse:
        """Generate response using Anthropic API."""
        start_time = time.time()
        
        try:
            messages = request.messages.copy()
            system_prompt = request.system_prompt or ""
            
            # Convert OpenAI format to Anthropic format
            if messages and messages[0]["role"] == "system":
                system_prompt = messages[0]["content"]
                messages = messages[1:]
                
            response = await self.client.messages.create(
                model=self.model_config.model_name,
                messages=messages,
                max_tokens=request.max_tokens or self.model_config.max_tokens,
                temperature=request.temperature or self.model_config.temperature,
                system=system_prompt
            )
            
            usage = {
                "prompt_tokens": response.usage.input_tokens,
                "completion_tokens": response.usage.output_tokens,
                "total_tokens": response.usage.input_tokens + response.usage.output_tokens
            }
            
            cost = self.calculate_cost(usage["prompt_tokens"], usage["completion_tokens"])
            response_time = time.time() - start_time
            
            return AIResponse(
                content=response.content[0].text,
                model=self.model_config.model_name,
                provider=ModelProvider.ANTHROPIC,
                usage=usage,
                response_time=response_time,
                cost=cost
            )
            
        except Exception as e:
            logger.error(f"Anthropic API error: {e}")
            raise
            
    async def generate_stream(self, request: AIRequest) -> AsyncGenerator[str, None]:
        """Generate streaming response using Anthropic API."""
        try:
            messages = request.messages.copy()
            system_prompt = request.system_prompt or ""
            
            if messages and messages[0]["role"] == "system":
                system_prompt = messages[0]["content"]
                messages = messages[1:]
                
            stream = await self.client.messages.create(
                model=self.model_config.model_name,
                messages=messages,
                max_tokens=request.max_tokens or self.model_config.max_tokens,
                temperature=request.temperature or self.model_config.temperature,
                system=system_prompt,
                stream=True
            )
            
            async for chunk in stream:
                if chunk.type == "content_block_delta":
                    if hasattr(chunk.delta, 'text'):
                        yield chunk.delta.text
                        
        except Exception as e:
            logger.error(f"Anthropic streaming error: {e}")
            raise


class GoogleProvider(AIProvider):
    """Google Gemini API provider."""
    
    def _initialize_client(self):
        """Initialize Google client."""
        # Using httpx for Google API calls since official client might not be available
        self.client = httpx.AsyncClient()
        self.base_url = "https://generativelanguage.googleapis.com/v1beta"
        
    async def generate(self, request: AIRequest) -> AIResponse:
        """Generate response using Google Gemini API."""
        start_time = time.time()
        
        try:
            # Convert messages to Gemini format
            contents = []
            for msg in request.messages:
                role = "user" if msg["role"] == "user" else "model"
                contents.append({
                    "role": role,
                    "parts": [{"text": msg["content"]}]
                })
                
            payload = {
                "contents": contents,
                "generationConfig": {
                    "temperature": request.temperature or self.model_config.temperature,
                    "maxOutputTokens": request.max_tokens or self.model_config.max_tokens,
                    "topP": self.model_config.top_p,
                }
            }
            
            if request.system_prompt:
                payload["systemInstruction"] = {
                    "parts": [{"text": request.system_prompt}]
                }
                
            url = f"{self.base_url}/models/{self.model_config.model_name}:generateContent"
            
            response = await self.client.post(
                url,
                json=payload,
                headers={"Content-Type": "application/json"},
                params={"key": self.api_key}
            )
            response.raise_for_status()
            
            result = response.json()
            
            if "candidates" in result and result["candidates"]:
                content = result["candidates"][0]["content"]["parts"][0]["text"]
            else:
                raise Exception("No content in response")
                
            # Estimate token usage (Google doesn't always provide this)
            prompt_tokens = sum(self.count_tokens(msg["content"]) for msg in request.messages)
            completion_tokens = self.count_tokens(content)
            
            usage = {
                "prompt_tokens": prompt_tokens,
                "completion_tokens": completion_tokens,
                "total_tokens": prompt_tokens + completion_tokens
            }
            
            cost = self.calculate_cost(usage["prompt_tokens"], usage["completion_tokens"])
            response_time = time.time() - start_time
            
            return AIResponse(
                content=content,
                model=self.model_config.model_name,
                provider=ModelProvider.GOOGLE,
                usage=usage,
                response_time=response_time,
                cost=cost
            )
            
        except Exception as e:
            logger.error(f"Google API error: {e}")
            raise
            
    async def generate_stream(self, request: AIRequest) -> AsyncGenerator[str, None]:
        """Generate streaming response using Google Gemini API."""
        # Google streaming implementation would go here
        # For now, fall back to non-streaming
        response = await self.generate(request)
        yield response.content


class RateLimiter:
    """Rate limiter for API calls."""
    
    def __init__(self):
        self.requests = defaultdict(list)  # provider -> [timestamps]
        self.tokens = defaultdict(list)    # provider -> [(timestamp, tokens)]
        
    async def check_rate_limit(self, provider: str, model_config: ModelConfig, estimated_tokens: int = 0):
        """Check if request is within rate limits."""
        current_time = time.time()
        
        # Clean old entries
        self._clean_old_entries(provider, current_time)
        
        # Check request rate limit
        if len(self.requests[provider]) >= model_config.rate_limit_rpm:
            wait_time = 60 - (current_time - self.requests[provider][0])
            if wait_time > 0:
                logger.warning(f"Rate limit hit for {provider}, waiting {wait_time:.2f}s")
                await asyncio.sleep(wait_time)
                
        # Check token rate limit
        total_tokens = sum(tokens for _, tokens in self.tokens[provider])
        if total_tokens + estimated_tokens > model_config.rate_limit_tpm:
            wait_time = 60 - (current_time - self.tokens[provider][0][0])
            if wait_time > 0:
                logger.warning(f"Token rate limit hit for {provider}, waiting {wait_time:.2f}s")
                await asyncio.sleep(wait_time)
                
        # Record this request
        self.requests[provider].append(current_time)
        if estimated_tokens > 0:
            self.tokens[provider].append((current_time, estimated_tokens))
            
    def _clean_old_entries(self, provider: str, current_time: float):
        """Remove entries older than 1 minute."""
        cutoff = current_time - 60
        self.requests[provider] = [t for t in self.requests[provider] if t > cutoff]
        self.tokens[provider] = [(t, tokens) for t, tokens in self.tokens[provider] if t > cutoff]


class AIService:
    """Main AI service with load balancing and caching."""
    
    def __init__(self):
        self.providers: Dict[str, AIProvider] = {}
        self.model_configs: Dict[str, ModelConfig] = {}
        self.rate_limiter = RateLimiter()
        self.cache: Dict[str, Tuple[AIResponse, float]] = {}  # cache_key -> (response, timestamp)
        self.cache_ttl = 3600  # 1 hour
        self.failover_enabled = True
        self.load_balancing_enabled = True
        
    def register_provider(self, name: str, provider: AIProvider, model_config: ModelConfig):
        """Register an AI provider."""
        self.providers[name] = provider
        self.model_configs[name] = model_config
        logger.info(f"Registered AI provider: {name}")
        
    def set_cache_ttl(self, ttl: int):
        """Set cache TTL in seconds."""
        self.cache_ttl = ttl
        
    async def generate(self, 
                      messages: List[Dict[str, str]], 
                      preferred_provider: str = None,
                      stream: bool = False,
                      **kwargs) -> Union[AIResponse, AsyncGenerator[str, None]]:
        """Generate AI response with intelligent routing."""
        
        # Select provider
        provider_name = await self._select_provider(preferred_provider, messages)
        if not provider_name:
            raise Exception("No available AI providers")
            
        provider = self.providers[provider_name]
        model_config = self.model_configs[provider_name]
        
        # Create request
        request = AIRequest(
            messages=messages,
            model_config=model_config,
            stream=stream,
            **kwargs
        )
        
        # Check cache for non-streaming requests
        if not stream:
            cached_response = self._get_cached_response(request)
            if cached_response:
                logger.info(f"Cache hit for request to {provider_name}")
                return cached_response
                
        # Estimate tokens for rate limiting
        estimated_tokens = sum(provider.count_tokens(msg["content"]) for msg in messages)
        
        # Apply rate limiting
        await self.rate_limiter.check_rate_limit(provider_name, model_config, estimated_tokens)
        
        try:
            if stream:
                return provider.generate_stream(request)
            else:
                response = await provider.generate(request)
                # Cache successful response
                self._cache_response(request, response)
                logger.info(f"Generated response using {provider_name} in {response.response_time:.2f}s")
                return response
                
        except Exception as e:
            logger.error(f"Provider {provider_name} failed: {e}")
            
            # Try failover if enabled
            if self.failover_enabled and len(self.providers) > 1:
                return await self._try_failover(request, exclude=[provider_name])
            raise
            
    async def _select_provider(self, preferred: str, messages: List[Dict[str, str]]) -> Optional[str]:
        """Select the best available provider."""
        available_providers = list(self.providers.keys())
        
        if preferred and preferred in available_providers:
            return preferred
            
        if not available_providers:
            return None
            
        # Simple load balancing based on recent response times
        if self.load_balancing_enabled and len(available_providers) > 1:
            # For now, just return the first available provider
            # In production, implement smart load balancing based on:
            # - Current load
            # - Average response time
            # - Error rates
            # - Cost optimization
            pass
            
        return available_providers[0]
        
    async def _try_failover(self, request: AIRequest, exclude: List[str]) -> AIResponse:
        """Try failover to another provider."""
        available = [name for name in self.providers.keys() if name not in exclude]
        
        for provider_name in available:
            try:
                provider = self.providers[provider_name]
                # Update request with new model config
                request.model_config = self.model_configs[provider_name]
                
                response = await provider.generate(request)
                logger.info(f"Failover successful to {provider_name}")
                return response
                
            except Exception as e:
                logger.warning(f"Failover to {provider_name} also failed: {e}")
                continue
                
        raise Exception("All providers failed")
        
    def _get_cached_response(self, request: AIRequest) -> Optional[AIResponse]:
        """Get cached response if available and not expired."""
        cache_key = request.to_cache_key()
        
        if cache_key in self.cache:
            response, timestamp = self.cache[cache_key]
            if time.time() - timestamp < self.cache_ttl:
                response.cached = True
                return response
            else:
                # Remove expired entry
                del self.cache[cache_key]
                
        return None
        
    def _cache_response(self, request: AIRequest, response: AIResponse):
        """Cache the response."""
        cache_key = request.to_cache_key()
        self.cache[cache_key] = (response, time.time())
        
        # Simple cache size management
        if len(self.cache) > 1000:
            # Remove oldest entries
            sorted_cache = sorted(self.cache.items(), key=lambda x: x[1][1])
            for key, _ in sorted_cache[:100]:  # Remove oldest 100 entries
                del self.cache[key]
                
    def get_stats(self) -> Dict[str, Any]:
        """Get service statistics."""
        stats = {
            "providers": list(self.providers.keys()),
            "cache_size": len(self.cache),
            "cache_hit_rate": 0.0,  # Would implement proper tracking
            "total_requests": 0,    # Would implement proper tracking
            "total_cost": 0.0,      # Would implement proper tracking
        }
        
        # Provider-specific stats
        provider_stats = {}
        for name, config in self.model_configs.items():
            provider_stats[name] = {
                "model": config.model_name,
                "provider": config.provider,
                "rate_limits": {
                    "rpm": config.rate_limit_rpm,
                    "tpm": config.rate_limit_tpm
                }
            }
            
        stats["provider_details"] = provider_stats
        return stats


# Global service instance
ai_service = AIService()


async def initialize_ai_service(openai_api_key: str = None, 
                               openai_base_url: str = None,
                               anthropic_api_key: str = None,
                               google_api_key: str = None) -> bool:
    """Initialize AI service with available providers."""
    success_count = 0
    
    # OpenAI provider
    if openai_api_key:
        try:
            openai_config = ModelConfig(
                provider=ModelProvider.OPENAI,
                model_name="qwen-max-latest",
                max_tokens=4096,
                temperature=0.7,
                cost_per_token=0.000015,  # Estimated cost per token
                rate_limit_rpm=3500,
                rate_limit_tpm=200000,
                capabilities=[
                    ModelCapability.TEXT_GENERATION,
                    ModelCapability.CHAT_COMPLETION,
                    ModelCapability.STREAMING,
                    ModelCapability.FUNCTION_CALLING
                ]
            )
            
            openai_provider = OpenAIProvider(openai_api_key, openai_config, openai_base_url)
            ai_service.register_provider("openai", openai_provider, openai_config)
            success_count += 1
            
        except Exception as e:
            logger.error(f"Failed to initialize OpenAI provider: {e}")
            
    # Anthropic provider  
    if anthropic_api_key:
        try:
            anthropic_config = ModelConfig(
                provider=ModelProvider.ANTHROPIC,
                model_name="claude-3-haiku-20240307",
                max_tokens=4096,
                temperature=0.7,
                cost_per_token=0.000025,  # Estimated cost per token
                rate_limit_rpm=1000,
                rate_limit_tpm=100000,
                capabilities=[
                    ModelCapability.TEXT_GENERATION,
                    ModelCapability.CHAT_COMPLETION,
                    ModelCapability.STREAMING
                ]
            )
            
            anthropic_provider = AnthropicProvider(anthropic_api_key, anthropic_config)
            ai_service.register_provider("anthropic", anthropic_provider, anthropic_config)
            success_count += 1
            
        except Exception as e:
            logger.error(f"Failed to initialize Anthropic provider: {e}")
            
    # Google provider
    if google_api_key:
        try:
            google_config = ModelConfig(
                provider=ModelProvider.GOOGLE,
                model_name="gemini-1.5-flash",
                max_tokens=8192,
                temperature=0.7,
                cost_per_token=0.00001,  # Estimated cost per token
                rate_limit_rpm=1000,
                rate_limit_tpm=120000,
                capabilities=[
                    ModelCapability.TEXT_GENERATION,
                    ModelCapability.CHAT_COMPLETION,
                    ModelCapability.VISION
                ]
            )
            
            google_provider = GoogleProvider(google_api_key, google_config)
            ai_service.register_provider("google", google_provider, google_config)
            success_count += 1
            
        except Exception as e:
            logger.error(f"Failed to initialize Google provider: {e}")
            
    logger.info(f"AI service initialized with {success_count} providers")
    return success_count > 0