"""Embedding service with multiple provider support including Ollama."""

import asyncio
import os
from typing import List, Optional, Dict, Any
import logging
import httpx
from abc import ABC, abstractmethod

logger = logging.getLogger(__name__)


class EmbeddingProvider(ABC):
    """Abstract base class for embedding providers."""

    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.model = config.get('model')
        self.vector_dimension = config.get('vector_dimension', 768)
        self.timeout = config.get('timeout', 30)
        self.max_retries = config.get('max_retries', 3)

    @abstractmethod
    async def embed(self, texts: List[str]) -> List[List[float]]:
        """Generate embeddings for texts."""
        pass

    @abstractmethod
    async def health_check(self) -> bool:
        """Check if the provider is healthy."""
        pass


class OllamaProvider(EmbeddingProvider):
    """Ollama embedding provider for local models."""

    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.base_url = config.get('base_url', 'http://localhost:11434')
        self.api_key = config.get('api_key', '')
        self.client = httpx.AsyncClient(
            base_url=self.base_url,
            timeout=self.timeout,
            headers={"Authorization": f"Bearer {self.api_key}"} if self.api_key else {}
        )

    async def embed(self, texts: List[str]) -> List[List[float]]:
        """Generate embeddings using Ollama."""
        embeddings = []

        for text in texts:
            for attempt in range(self.max_retries):
                try:
                    response = await self.client.post(
                        '/api/embeddings',
                        json={
                            'model': self.model,
                            'prompt': text,
                            'options': {
                                'temperature': 0.0,  # Deterministic embeddings
                                'seed': 42  # Consistent embeddings
                            }
                        }
                    )

                    if response.status_code == 200:
                        result = response.json()
                        embedding = result.get('embedding', [])

                        if embedding:
                            embeddings.append(embedding)
                            break
                        else:
                            logger.warning(f"Empty embedding received for text: {text[:50]}...")
                            embeddings.append([0.0] * self.vector_dimension)
                            break
                    else:
                        logger.error(f"Ollama API error: {response.status_code} - {response.text}")
                        if attempt == self.max_retries - 1:
                            embeddings.append([0.0] * self.vector_dimension)
                        else:
                            await asyncio.sleep(2 ** attempt)  # Exponential backoff

                except Exception as e:
                    logger.error(f"Ollama embedding error (attempt {attempt + 1}): {e}")
                    if attempt == self.max_retries - 1:
                        embeddings.append([0.0] * self.vector_dimension)
                    else:
                        await asyncio.sleep(2 ** attempt)

        return embeddings

    async def health_check(self) -> bool:
        """Check Ollama service health."""
        try:
            response = await self.client.get('/api/tags')
            if response.status_code == 200:
                # Check if our model is available
                models = response.json().get('models', [])
                available_models = [m.get('name', '') for m in models]

                if self.model in available_models:
                    logger.info(f"Ollama model {self.model} is available")
                    return True
                else:
                    logger.warning(f"Ollama model {self.model} not found. Available: {available_models}")
                    return False
            else:
                logger.error(f"Ollama health check failed: {response.status_code}")
                return False
        except Exception as e:
            logger.error(f"Ollama health check error: {e}")
            return False

    async def close(self):
        """Close HTTP client."""
        await self.client.aclose()


class OpenAIProvider(EmbeddingProvider):
    """OpenAI embedding provider."""

    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.api_key = config.get('api_key')
        self.base_url = config.get('base_url', 'https://api.openai.com/v1')
        self.client = httpx.AsyncClient(
            base_url=self.base_url,
            timeout=self.timeout,
            headers={"Authorization": f"Bearer {self.api_key}"}
        )

    async def embed(self, texts: List[str]) -> List[List[float]]:
        """Generate embeddings using OpenAI."""
        embeddings = []

        # Process in batches
        batch_size = min(self.config.get('batch_size', 10), len(texts))

        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]

            for attempt in range(self.max_retries):
                try:
                    response = await self.client.post(
                        '/embeddings',
                        json={
                            'model': self.model,
                            'input': batch
                        }
                    )

                    if response.status_code == 200:
                        result = response.json()
                        batch_embeddings = [item['embedding'] for item in result['data']]
                        embeddings.extend(batch_embeddings)
                        break
                    else:
                        logger.error(f"OpenAI API error: {response.status_code} - {response.text}")
                        if attempt == self.max_retries - 1:
                            # Add zero embeddings for failed batch
                            embeddings.extend([[0.0] * self.vector_dimension for _ in batch])
                        else:
                            await asyncio.sleep(2 ** attempt)

                except Exception as e:
                    logger.error(f"OpenAI embedding error (attempt {attempt + 1}): {e}")
                    if attempt == self.max_retries - 1:
                        embeddings.extend([[0.0] * self.vector_dimension for _ in batch])
                    else:
                        await asyncio.sleep(2 ** attempt)

        return embeddings

    async def health_check(self) -> bool:
        """Check OpenAI service health."""
        try:
            response = await self.client.get('/models')
            return response.status_code == 200
        except Exception as e:
            logger.error(f"OpenAI health check error: {e}")
            return False

    async def close(self):
        """Close HTTP client."""
        await self.client.aclose()


class EmbeddingService:
    """Embedding service with provider management."""

    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.current_provider: Optional[EmbeddingProvider] = None
        self.providers: Dict[str, EmbeddingProvider] = {}
        self._initialize_providers()

    def _initialize_providers(self):
        """Initialize available providers."""
        provider_configs = self.config.get('providers', {})

        # Ollama provider
        if 'ollama' in provider_configs:
            ollama_config = {**self.config, **provider_configs['ollama']}
            self.providers['ollama'] = OllamaProvider(ollama_config)
            logger.info("Initialized Ollama provider")

        # OpenAI provider
        if 'openai' in provider_configs:
            openai_config = {**self.config, **provider_configs['openai']}
            api_key = os.getenv(openai_config.get('api_key_env', 'OPENAI_API_KEY'))
            if api_key:
                openai_config['api_key'] = api_key
                self.providers['openai'] = OpenAIProvider(openai_config)
                logger.info("Initialized OpenAI provider")
            else:
                logger.warning("OpenAI API key not found, skipping OpenAI provider")

    async def set_provider(self, provider_name: str) -> bool:
        """Set current embedding provider."""
        if provider_name not in self.providers:
            logger.error(f"Provider {provider_name} not available")
            return False

        # Close current provider if different
        if self.current_provider and self.current_provider != self.providers[provider_name]:
            await self.current_provider.close()

        self.current_provider = self.providers[provider_name]
        logger.info(f"Switched to provider: {provider_name}")
        return True

    async def embed(self, texts: List[str], provider: Optional[str] = None) -> List[List[float]]:
        """Generate embeddings for texts."""
        if provider:
            await self.set_provider(provider)

        if not self.current_provider:
            # Try to use default provider
            default_provider = self.config.get('provider', 'ollama')
            if not await self.set_provider(default_provider):
                logger.error(f"Default provider {default_provider} not available")
                # Return zero embeddings as fallback
                vector_dim = self.config.get('vector_dimension', 1024)
                return [[0.0] * vector_dim for _ in texts]

        try:
            return await self.current_provider.embed(texts)
        except Exception as e:
            logger.error(f"Embedding generation failed: {e}")
            # Return zero embeddings as fallback
            vector_dim = self.config.get('vector_dimension', 1024)
            return [[0.0] * vector_dim for _ in texts]

    async def embed_single(self, text: str, provider: Optional[str] = None) -> List[float]:
        """Generate embedding for single text."""
        embeddings = await self.embed([text], provider)
        return embeddings[0] if embeddings else []

    async def health_check(self, provider: Optional[str] = None) -> Dict[str, bool]:
        """Check health of providers."""
        if provider:
            # Check specific provider
            if provider in self.providers:
                return {provider: await self.providers[provider].health_check()}
            else:
                return {provider: False}
        else:
            # Check all providers
            results = {}
            for name, prov in self.providers.items():
                results[name] = await prov.health_check()
            return results

    def get_available_providers(self) -> List[str]:
        """Get list of available providers."""
        return list(self.providers.keys())

    def get_current_provider(self) -> Optional[str]:
        """Get current provider name."""
        if not self.current_provider:
            return None

        for name, provider in self.providers.items():
            if provider == self.current_provider:
                return name
        return None

    async def close(self):
        """Close all providers."""
        for provider in self.providers.values():
            await provider.close()
        logger.info("Closed all embedding providers")


# Global embedding service instance
_embedding_service: Optional[EmbeddingService] = None


async def get_embedding_service() -> EmbeddingService:
    """Get global embedding service instance."""
    global _embedding_service
    if _embedding_service is None:
        from .config import get_embedding_config
        config = get_embedding_config()
        _embedding_service = EmbeddingService(config.dict())
    return _embedding_service


async def close_embedding_service():
    """Close global embedding service."""
    global _embedding_service
    if _embedding_service:
        await _embedding_service.close()
        _embedding_service = None


# Convenience functions
async def embed_texts(texts: List[str], provider: Optional[str] = None) -> List[List[float]]:
    """Convenience function to embed texts."""
    service = await get_embedding_service()
    return await service.embed(texts, provider)


async def embed_text(text: str, provider: Optional[str] = None) -> List[float]:
    """Convenience function to embed single text."""
    service = await get_embedding_service()
    return await service.embed_single(text, provider)