"""
OpenAI embedder implementation.
"""

import asyncio
from typing import List, Optional, Dict, Any
from .base import IEmbedder, EmbeddingResponse, EmbedderInfo, DocumentType, TaskType

try:
    import openai
    from openai import OpenAI
except ImportError:
    raise ImportError("openai package is required for OpenAI embedder. Install with: pip install openai")

# Constants
MAX_BATCH_TOKENS = 100000
MAX_ITEM_TOKENS = 8000
MAX_BATCH_RETRIES = 3
INITIAL_RETRY_DELAY_MS = 1000


class OpenAiEmbedder(IEmbedder):
    """OpenAI implementation of the embedder interface with batching and rate limiting."""
    
    def __init__(self, api_key: str, model_id: str = "text-embedding-3-small"):
        if not api_key:
            raise ValueError("OpenAI API key is required")
            
        self.client = OpenAI(api_key=api_key)
        self.model_id = model_id
    
    async def create_embeddings(
        self, 
        texts: List[str], 
        doc_type: DocumentType = "text", 
        task: TaskType = "retrieval_document"
    ) -> EmbeddingResponse:
        """Creates embeddings for the given texts with batching and rate limiting."""
        
        # Process texts and handle length limits
        processed_texts = []
        for i, text in enumerate(texts):
            estimated_tokens = len(text) // 4  # Rough estimation
            if estimated_tokens > MAX_ITEM_TOKENS:
                # Truncate text if it's too long
                processed_texts.append(text[:MAX_ITEM_TOKENS * 4])
            else:
                processed_texts.append(text)
        
        all_embeddings: List[List[float]] = []
        usage = {"prompt_tokens": 0, "total_tokens": 0}
        remaining_texts = processed_texts.copy()
        
        while remaining_texts:
            current_batch: List[str] = []
            current_batch_tokens = 0
            processed_indices: List[int] = []
            
            for i, text in enumerate(remaining_texts):
                item_tokens = len(text) // 4
                
                if item_tokens > MAX_ITEM_TOKENS:
                    processed_indices.append(i)
                    continue
                
                if current_batch_tokens + item_tokens <= MAX_BATCH_TOKENS:
                    current_batch.append(text)
                    current_batch_tokens += item_tokens
                    processed_indices.append(i)
                else:
                    break
            
            # Remove processed items (in reverse order)
            for i in reversed(processed_indices):
                remaining_texts.pop(i)
            
            if current_batch:
                batch_result = await self._embed_batch_with_retries(current_batch)
                all_embeddings.extend(batch_result.embeddings)
                if batch_result.usage:
                    usage["prompt_tokens"] += batch_result.usage.get("prompt_tokens", 0)
                    usage["total_tokens"] += batch_result.usage.get("total_tokens", 0)
        
        return EmbeddingResponse(embeddings=all_embeddings, usage=usage)
    
    async def _embed_batch_with_retries(self, batch_texts: List[str]) -> EmbeddingResponse:
        """Helper method to handle batch embedding with retries and exponential backoff."""
        for attempt in range(MAX_BATCH_RETRIES):
            try:
                # Run in thread pool to avoid blocking
                response = await asyncio.get_event_loop().run_in_executor(
                    None, 
                    lambda: self.client.embeddings.create(
                        input=batch_texts,
                        model=self.model_id
                    )
                )
                
                return EmbeddingResponse(
                    embeddings=[item.embedding for item in response.data],
                    usage={
                        "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
                        "total_tokens": response.usage.total_tokens if response.usage else 0,
                    }
                )
                
            except Exception as error:
                has_more_attempts = attempt < MAX_BATCH_RETRIES - 1
                
                # Check if it's a rate limit error
                if hasattr(error, 'status_code') and error.status_code == 429 and has_more_attempts:
                    delay_ms = INITIAL_RETRY_DELAY_MS * (2 ** attempt)
                    print(f"Rate limit hit, retrying in {delay_ms}ms (attempt {attempt + 1}/{MAX_BATCH_RETRIES})")
                    await asyncio.sleep(delay_ms / 1000.0)
                    continue
                
                if not has_more_attempts:
                    raise RuntimeError(f"Failed to create embeddings after {MAX_BATCH_RETRIES} attempts: {error}")
                
                print(f"OpenAI embedder error (attempt {attempt + 1}/{MAX_BATCH_RETRIES}): {error}")
                await asyncio.sleep(INITIAL_RETRY_DELAY_MS * (2 ** attempt) / 1000.0)
        
        raise RuntimeError(f"Failed to create embeddings after {MAX_BATCH_RETRIES} attempts")
    
    async def validate_configuration(self) -> tuple[bool, Optional[str]]:
        """Validates the OpenAI embedder configuration."""
        try:
            # Test with a minimal embedding request
            response = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: self.client.embeddings.create(
                    input=["test"],
                    model=self.model_id
                )
            )
            
            if not response.data or len(response.data) == 0:
                return False, "Invalid response format from OpenAI API"
            
            return True, None
            
        except Exception as error:
            return False, f"OpenAI API validation failed: {error}"
    
    @property
    def embedder_info(self) -> EmbedderInfo:
        return EmbedderInfo(name="openai")