"""
LLM Interface for Cross-Temporal Dialogue Generation.

This module provides a unified interface for multiple LLM backends with
personality conditioning, context management, and response quality evaluation.
"""

import numpy as np
import logging
import asyncio
import time
from typing import Dict, List, Optional, Tuple, Any, Union, Callable
from dataclasses import dataclass, field
from enum import Enum
from abc import ABC, abstractmethod
import json
import aiohttp

logger = logging.getLogger(__name__)


class LLMProvider(Enum):
    """Supported LLM providers."""
    OPENAI = "openai"
    ANTHROPIC = "anthropic"
    LOCAL_TRANSFORMERS = "local_transformers"
    OLLAMA = "ollama"
    TOGETHER = "together"


class ResponseQuality(Enum):
    """Response quality levels."""
    EXCELLENT = "excellent"
    GOOD = "good"
    ACCEPTABLE = "acceptable"
    POOR = "poor"
    REJECTED = "rejected"


@dataclass
class LLMConfig:
    """Configuration for LLM backend."""
    provider: LLMProvider
    model_name: str
    api_key: Optional[str] = None
    api_endpoint: Optional[str] = None
    max_tokens: int = 500
    temperature: float = 0.7
    top_p: float = 0.9
    frequency_penalty: float = 0.0
    presence_penalty: float = 0.0
    timeout_seconds: int = 30
    retry_attempts: int = 3
    fallback_configs: List['LLMConfig'] = field(default_factory=list)


@dataclass
class GenerationParams:
    """Parameters for text generation."""
    prompt: str
    context: Optional[Dict[str, Any]] = None
    personality_conditioning: Optional[Dict[str, Any]] = None
    conversation_history: List[Dict[str, str]] = field(default_factory=list)
    temperature: float = 0.7
    max_length: int = 500
    stop_sequences: List[str] = field(default_factory=list)
    response_format: str = "text"  # "text", "json", "structured"


@dataclass
class LLMResponse:
    """Response from LLM generation."""
    text: str
    quality_score: float
    confidence: float
    provider_used: LLMProvider
    model_used: str
    generation_time_ms: float
    token_count: int
    finish_reason: str
    raw_response: Optional[Dict] = None
    metadata: Dict[str, Any] = field(default_factory=dict)


class LLMBackend(ABC):
    """Abstract base class for LLM backends."""
    
    @abstractmethod
    async def generate(self, params: GenerationParams) -> LLMResponse:
        """Generate text using the LLM backend."""
        pass
    
    @abstractmethod
    def is_available(self) -> bool:
        """Check if the backend is available."""
        pass


class OpenAIBackend(LLMBackend):
    """OpenAI API backend."""
    
    def __init__(self, config: LLMConfig):
        self.config = config
        self.session = None
    
    async def _ensure_session(self):
        """Ensure aiohttp session exists."""
        if self.session is None:
            self.session = aiohttp.ClientSession()
    
    async def generate(self, params: GenerationParams) -> LLMResponse:
        """Generate text using OpenAI API."""
        await self._ensure_session()
        start_time = time.time()
        
        # Prepare request
        messages = self._prepare_messages(params)
        
        payload = {
            "model": self.config.model_name,
            "messages": messages,
            "max_tokens": params.max_length,
            "temperature": params.temperature,
            "top_p": self.config.top_p,
            "frequency_penalty": self.config.frequency_penalty,
            "presence_penalty": self.config.presence_penalty,
            "stop": params.stop_sequences if params.stop_sequences else None
        }
        
        headers = {
            "Authorization": f"Bearer {self.config.api_key}",
            "Content-Type": "application/json"
        }
        
        try:
            async with self.session.post(
                "https://api.openai.com/v1/chat/completions",
                json=payload,
                headers=headers,
                timeout=self.config.timeout_seconds
            ) as response:
                result = await response.json()
                
                if response.status == 200:
                    generated_text = result["choices"][0]["message"]["content"]
                    finish_reason = result["choices"][0]["finish_reason"]
                    token_count = result["usage"]["total_tokens"]
                    
                    generation_time = (time.time() - start_time) * 1000
                    
                    return LLMResponse(
                        text=generated_text,
                        quality_score=0.8,  # Placeholder - would implement quality assessment
                        confidence=0.85,
                        provider_used=LLMProvider.OPENAI,
                        model_used=self.config.model_name,
                        generation_time_ms=generation_time,
                        token_count=token_count,
                        finish_reason=finish_reason,
                        raw_response=result
                    )
                else:
                    raise Exception(f"OpenAI API error: {result.get('error', 'Unknown error')}")
        
        except Exception as e:
            logger.error(f"OpenAI generation failed: {e}")
            raise
    
    def _prepare_messages(self, params: GenerationParams) -> List[Dict[str, str]]:
        """Prepare messages for OpenAI chat format."""
        messages = []
        
        # Add system message with personality conditioning
        system_content = self._create_system_message(params)
        messages.append({"role": "system", "content": system_content})
        
        # Add conversation history
        for entry in params.conversation_history:
            if entry.get("role") in ["user", "assistant"]:
                messages.append({
                    "role": entry["role"],
                    "content": entry["content"]
                })
        
        # Add current prompt
        messages.append({"role": "user", "content": params.prompt})
        
        return messages
    
    def _create_system_message(self, params: GenerationParams) -> str:
        """Create system message with personality conditioning."""
        system_parts = []
        
        # Add context information
        if params.context:
            historical_period = params.context.get('historical_period', '')
            location = params.context.get('location', '')
            cultural_context = params.context.get('cultural_context', '')
            
            system_parts.append(
                f"You are engaging in historical dialogue set in {historical_period} "
                f"in {location}. The cultural context is {cultural_context}."
            )
        
        # Add personality conditioning
        if params.personality_conditioning:
            traits = params.personality_conditioning.get('big_five_traits', {})
            cultural_dims = params.personality_conditioning.get('cultural_dimensions', {})
            
            # Convert traits to personality description
            personality_desc = self._traits_to_description(traits, cultural_dims)
            system_parts.append(f"Embody this personality: {personality_desc}")
            
            # Add language style requirements
            language_style = params.personality_conditioning.get('language_style', 'period_appropriate')
            formality_level = params.personality_conditioning.get('formality_level', 0.7)
            
            system_parts.append(
                f"Use {language_style} language with formality level {formality_level:.1f}/1.0. "
                f"Maintain historical accuracy and avoid anachronisms."
            )
        
        return " ".join(system_parts)
    
    def _traits_to_description(self, traits: Dict, cultural_dims: Dict) -> str:
        """Convert personality traits to natural language description."""
        descriptions = []
        
        # Big Five traits
        if traits.get('extraversion', 0.5) > 0.7:
            descriptions.append("outgoing and sociable")
        elif traits.get('extraversion', 0.5) < 0.3:
            descriptions.append("reserved and introspective")
        
        if traits.get('agreeableness', 0.5) > 0.7:
            descriptions.append("cooperative and trusting")
        elif traits.get('agreeableness', 0.5) < 0.3:
            descriptions.append("competitive and skeptical")
        
        if traits.get('conscientiousness', 0.5) > 0.7:
            descriptions.append("organized and disciplined")
        elif traits.get('conscientiousness', 0.5) < 0.3:
            descriptions.append("flexible and spontaneous")
        
        # Cultural dimensions
        if cultural_dims.get('power_distance', 0.5) > 0.7:
            descriptions.append("respectful of hierarchy")
        elif cultural_dims.get('power_distance', 0.5) < 0.3:
            descriptions.append("egalitarian in outlook")
        
        return ", ".join(descriptions) if descriptions else "balanced personality"
    
    def is_available(self) -> bool:
        """Check if OpenAI backend is available."""
        return self.config.api_key is not None


class AnthropicBackend(LLMBackend):
    """Anthropic Claude API backend."""
    
    def __init__(self, config: LLMConfig):
        self.config = config
        self.session = None
    
    async def _ensure_session(self):
        """Ensure aiohttp session exists."""
        if self.session is None:
            self.session = aiohttp.ClientSession()
    
    async def generate(self, params: GenerationParams) -> LLMResponse:
        """Generate text using Anthropic Claude API."""
        await self._ensure_session()
        start_time = time.time()
        
        # Prepare prompt with personality conditioning
        full_prompt = self._prepare_anthropic_prompt(params)
        
        payload = {
            "model": self.config.model_name,
            "prompt": full_prompt,
            "max_tokens_to_sample": params.max_length,
            "temperature": params.temperature,
            "top_p": self.config.top_p,
            "stop_sequences": params.stop_sequences
        }
        
        headers = {
            "x-api-key": self.config.api_key,
            "Content-Type": "application/json",
            "anthropic-version": "2023-06-01"
        }
        
        try:
            async with self.session.post(
                "https://api.anthropic.com/v1/complete",
                json=payload,
                headers=headers,
                timeout=self.config.timeout_seconds
            ) as response:
                result = await response.json()
                
                if response.status == 200:
                    generated_text = result["completion"]
                    stop_reason = result.get("stop_reason", "max_tokens")
                    
                    generation_time = (time.time() - start_time) * 1000
                    
                    return LLMResponse(
                        text=generated_text.strip(),
                        quality_score=0.85,
                        confidence=0.8,
                        provider_used=LLMProvider.ANTHROPIC,
                        model_used=self.config.model_name,
                        generation_time_ms=generation_time,
                        token_count=len(generated_text.split()),  # Rough estimate
                        finish_reason=stop_reason,
                        raw_response=result
                    )
                else:
                    error_msg = result.get('error', {}).get('message', 'Unknown error')
                    raise Exception(f"Anthropic API error: {error_msg}")
        
        except Exception as e:
            logger.error(f"Anthropic generation failed: {e}")
            raise
    
    def _prepare_anthropic_prompt(self, params: GenerationParams) -> str:
        """Prepare prompt for Anthropic format."""
        prompt_parts = []
        
        # Add context and personality conditioning
        if params.context or params.personality_conditioning:
            system_msg = self._create_system_message(params)
            prompt_parts.append(f"\\n\\nHuman: {system_msg}")
        
        # Add conversation history
        for entry in params.conversation_history:
            role = "Human" if entry.get("role") == "user" else "Assistant"
            prompt_parts.append(f"\\n\\n{role}: {entry['content']}")
        
        # Add current prompt
        prompt_parts.append(f"\\n\\nHuman: {params.prompt}")
        prompt_parts.append("\\n\\nAssistant:")
        
        return "".join(prompt_parts)
    
    def _create_system_message(self, params: GenerationParams) -> str:
        """Create system message for Anthropic (same as OpenAI for now)."""
        # Reuse OpenAI system message creation
        openai_backend = OpenAIBackend(self.config)
        return openai_backend._create_system_message(params)
    
    def is_available(self) -> bool:
        """Check if Anthropic backend is available."""
        return self.config.api_key is not None


class LocalTransformersBackend(LLMBackend):
    """Local transformers backend using Hugging Face."""
    
    def __init__(self, config: LLMConfig):
        self.config = config
        self.pipeline = None
        self._initialize_pipeline()
    
    def _initialize_pipeline(self):
        """Initialize local transformers pipeline."""
        try:
            from transformers import pipeline
            self.pipeline = pipeline(
                "text-generation",
                model=self.config.model_name,
                device_map="auto" if self._has_gpu() else None
            )
            logger.info(f"Initialized local transformers with model: {self.config.model_name}")
        except ImportError:
            logger.warning("transformers library not available")
        except Exception as e:
            logger.error(f"Failed to initialize local model: {e}")
    
    def _has_gpu(self) -> bool:
        """Check if GPU is available."""
        try:
            import torch
            return torch.cuda.is_available()
        except ImportError:
            return False
    
    async def generate(self, params: GenerationParams) -> LLMResponse:
        """Generate text using local transformers."""
        if not self.pipeline:
            raise Exception("Local transformers pipeline not initialized")
        
        start_time = time.time()
        
        # Prepare full prompt
        full_prompt = self._prepare_local_prompt(params)
        
        try:
            # Run generation in thread pool to avoid blocking
            loop = asyncio.get_event_loop()
            result = await loop.run_in_executor(
                None,
                lambda: self.pipeline(
                    full_prompt,
                    max_new_tokens=params.max_length,
                    temperature=params.temperature,
                    top_p=self.config.top_p,
                    do_sample=True,
                    pad_token_id=self.pipeline.tokenizer.eos_token_id
                )
            )
            
            generated_text = result[0]["generated_text"]
            # Remove the prompt from the generated text
            if generated_text.startswith(full_prompt):
                generated_text = generated_text[len(full_prompt):].strip()
            
            generation_time = (time.time() - start_time) * 1000
            
            return LLMResponse(
                text=generated_text,
                quality_score=0.7,  # Local models typically lower quality
                confidence=0.75,
                provider_used=LLMProvider.LOCAL_TRANSFORMERS,
                model_used=self.config.model_name,
                generation_time_ms=generation_time,
                token_count=len(generated_text.split()),
                finish_reason="max_tokens",
                raw_response=result[0]
            )
        
        except Exception as e:
            logger.error(f"Local generation failed: {e}")
            raise
    
    def _prepare_local_prompt(self, params: GenerationParams) -> str:
        """Prepare prompt for local model."""
        # Simple concatenation approach
        prompt_parts = []
        
        if params.personality_conditioning or params.context:
            system_msg = self._create_system_message(params)
            prompt_parts.append(f"Instructions: {system_msg}\\n\\n")
        
        # Add conversation history
        for entry in params.conversation_history:
            role = entry.get("role", "user").title()
            prompt_parts.append(f"{role}: {entry['content']}\\n")
        
        # Add current prompt
        prompt_parts.append(f"User: {params.prompt}\\nAssistant:")
        
        return "".join(prompt_parts)
    
    def _create_system_message(self, params: GenerationParams) -> str:
        """Create system message for local model."""
        # Reuse OpenAI system message creation
        openai_backend = OpenAIBackend(self.config)
        return openai_backend._create_system_message(params)
    
    def is_available(self) -> bool:
        """Check if local transformers backend is available."""
        return self.pipeline is not None


class LLMInterface:
    """
    Unified interface for multiple LLM backends with personality conditioning.
    
    Features:
    - Multiple LLM backend support (OpenAI, Anthropic, local models)
    - Personality vector conditioning for language generation
    - Context management for multi-turn conversations
    - Response quality evaluation and filtering
    - Automatic fallback between backends
    """
    
    def __init__(self, configs: List[LLMConfig]):
        """
        Initialize LLM interface with backend configurations.
        
        Args:
            configs: List of LLM configurations in priority order
        """
        self.configs = configs
        self.backends = {}
        self.fallback_order = []
        
        # Initialize backends
        self._initialize_backends()
        
        # Quality evaluation settings
        self.quality_threshold = 0.6
        self.quality_filters = []
        
        # Performance tracking
        self.generation_stats = {
            'total_requests': 0,
            'successful_generations': 0,
            'failed_generations': 0,
            'fallback_uses': 0,
            'average_generation_time': 0.0,
            'backend_usage': {provider.value: 0 for provider in LLMProvider}
        }
        
        logger.info(f"LLMInterface initialized with {len(self.backends)} backends")
    
    def _initialize_backends(self):
        """Initialize all configured backends."""
        for config in self.configs:
            try:
                backend = self._create_backend(config)
                if backend and backend.is_available():
                    self.backends[config.provider] = backend
                    self.fallback_order.append(config.provider)
                    logger.info(f"Initialized {config.provider.value} backend")
                else:
                    logger.warning(f"Backend {config.provider.value} not available")
            except Exception as e:
                logger.error(f"Failed to initialize {config.provider.value} backend: {e}")
    
    def _create_backend(self, config: LLMConfig) -> Optional[LLMBackend]:
        """Create backend instance based on provider type."""
        if config.provider == LLMProvider.OPENAI:
            return OpenAIBackend(config)
        elif config.provider == LLMProvider.ANTHROPIC:
            return AnthropicBackend(config)
        elif config.provider == LLMProvider.LOCAL_TRANSFORMERS:
            return LocalTransformersBackend(config)
        else:
            logger.warning(f"Unsupported provider: {config.provider}")
            return None
    
    async def generate_response(
        self,
        prompt: str,
        context: Optional[Dict[str, Any]] = None,
        personality_conditioning: Optional[Dict[str, Any]] = None,
        conversation_history: List[Dict[str, str]] = None,
        temperature: float = 0.7,
        max_length: int = 500
    ) -> str:
        """
        Generate response using the best available backend.
        
        Args:
            prompt: Input prompt for generation
            context: Dialogue context information
            personality_conditioning: Personality parameters for conditioning
            conversation_history: Previous conversation turns
            temperature: Generation temperature
            max_length: Maximum response length
            
        Returns:
            Generated response text
        """
        if conversation_history is None:
            conversation_history = []
        
        params = GenerationParams(
            prompt=prompt,
            context=context,
            personality_conditioning=personality_conditioning,
            conversation_history=conversation_history,
            temperature=temperature,
            max_length=max_length
        )
        
        response = await self.generate_with_fallback(params)
        return response.text
    
    async def generate_with_fallback(self, params: GenerationParams) -> LLMResponse:
        """
        Generate response with automatic fallback between backends.
        
        Args:
            params: Generation parameters
            
        Returns:
            LLM response from successful backend
            
        Raises:
            Exception: If all backends fail
        """
        self.generation_stats['total_requests'] += 1
        
        last_exception = None
        
        for provider in self.fallback_order:
            if provider not in self.backends:
                continue
            
            try:
                backend = self.backends[provider]
                response = await backend.generate(params)
                
                # Quality check
                if self._passes_quality_check(response):
                    # Update stats
                    self.generation_stats['successful_generations'] += 1
                    self.generation_stats['backend_usage'][provider.value] += 1
                    self._update_generation_time(response.generation_time_ms)
                    
                    return response
                else:
                    logger.info(f"Response from {provider.value} failed quality check")
                    continue
            
            except Exception as e:
                logger.warning(f"Generation failed with {provider.value}: {e}")
                last_exception = e
                
                # Track fallback usage if not first backend
                if provider != self.fallback_order[0]:
                    self.generation_stats['fallback_uses'] += 1
                
                continue
        
        # All backends failed
        self.generation_stats['failed_generations'] += 1
        raise Exception(f"All LLM backends failed. Last error: {last_exception}")
    
    def _passes_quality_check(self, response: LLMResponse) -> bool:
        """Check if response meets quality standards."""
        # Basic quality checks
        if response.quality_score < self.quality_threshold:
            return False
        
        if len(response.text.strip()) < 10:
            return False
        
        # Run custom quality filters
        for filter_func in self.quality_filters:
            if not filter_func(response):
                return False
        
        return True
    
    def _update_generation_time(self, generation_time: float):
        """Update average generation time."""
        total_successful = self.generation_stats['successful_generations']
        current_avg = self.generation_stats['average_generation_time']
        
        new_avg = ((current_avg * (total_successful - 1) + generation_time) / 
                   total_successful)
        self.generation_stats['average_generation_time'] = new_avg
    
    async def evaluate_response_quality(
        self,
        response: str,
        context: Optional[Dict[str, Any]] = None,
        expected_criteria: Optional[List[str]] = None
    ) -> Tuple[ResponseQuality, float, Dict[str, Any]]:
        """
        Evaluate the quality of a generated response.
        
        Args:
            response: Generated response text
            context: Generation context
            expected_criteria: Expected quality criteria
            
        Returns:
            Tuple of (quality_level, score, detailed_metrics)
        """
        metrics = {}
        score = 0.0
        
        # Length check
        length_score = min(1.0, len(response.strip()) / 50.0)  # Expect at least 50 chars
        metrics['length_score'] = length_score
        score += length_score * 0.2
        
        # Coherence check (simple heuristics)
        coherence_score = self._assess_coherence(response)
        metrics['coherence_score'] = coherence_score
        score += coherence_score * 0.3
        
        # Historical appropriateness (if context provided)
        if context and context.get('historical_period'):
            historical_score = self._assess_historical_appropriateness(response, context)
            metrics['historical_score'] = historical_score
            score += historical_score * 0.3
        else:
            score += 0.8 * 0.3  # Default score if no historical context
        
        # Personality consistency (if personality conditioning was used)
        personality_score = 0.8  # Default
        if context and context.get('personality_conditioning'):
            personality_score = self._assess_personality_consistency(response, context)
        metrics['personality_score'] = personality_score
        score += personality_score * 0.2
        
        # Determine quality level
        if score >= 0.9:
            quality = ResponseQuality.EXCELLENT
        elif score >= 0.75:
            quality = ResponseQuality.GOOD
        elif score >= 0.6:
            quality = ResponseQuality.ACCEPTABLE
        elif score >= 0.4:
            quality = ResponseQuality.POOR
        else:
            quality = ResponseQuality.REJECTED
        
        metrics['overall_score'] = score
        
        return quality, score, metrics
    
    def _assess_coherence(self, text: str) -> float:
        """Assess text coherence using simple heuristics."""
        if not text.strip():
            return 0.0
        
        sentences = [s.strip() for s in text.split('.') if s.strip()]
        if len(sentences) == 0:
            return 0.5
        
        # Check for reasonable sentence length
        avg_sentence_length = sum(len(s.split()) for s in sentences) / len(sentences)
        length_score = min(1.0, avg_sentence_length / 10.0)  # Expect ~10 words per sentence
        
        # Check for repetition (simple)
        words = text.lower().split()
        unique_words = set(words)
        repetition_score = len(unique_words) / len(words) if words else 0.0
        
        return (length_score + repetition_score) / 2.0
    
    def _assess_historical_appropriateness(self, text: str, context: Dict) -> float:
        """Assess historical appropriateness of text."""
        # Simple checks for common anachronisms
        modern_terms = [
            'computer', 'internet', 'smartphone', 'television',
            'okay', 'cool', 'awesome', 'dude'
        ]
        
        text_lower = text.lower()
        anachronism_count = sum(1 for term in modern_terms if term in text_lower)
        
        # Penalty for anachronisms
        anachronism_penalty = anachronism_count * 0.2
        
        # Basic appropriateness score
        base_score = 0.8
        
        return max(0.0, base_score - anachronism_penalty)
    
    def _assess_personality_consistency(self, text: str, context: Dict) -> float:
        """Assess personality consistency in generated text."""
        # This would implement more sophisticated personality assessment
        # For now, return a reasonable default
        return 0.75
    
    def add_quality_filter(self, filter_func: Callable[[LLMResponse], bool]):
        """Add custom quality filter function."""
        self.quality_filters.append(filter_func)
        logger.info("Added custom quality filter")
    
    def set_quality_threshold(self, threshold: float):
        """Set minimum quality threshold for responses."""
        self.quality_threshold = max(0.0, min(1.0, threshold))
        logger.info(f"Quality threshold set to {self.quality_threshold}")
    
    def get_available_backends(self) -> List[LLMProvider]:
        """Get list of available backends."""
        return list(self.backends.keys())
    
    def get_performance_metrics(self) -> Dict[str, Any]:
        """Get performance metrics for the LLM interface."""
        total_requests = self.generation_stats['total_requests']
        
        metrics = self.generation_stats.copy()
        
        if total_requests > 0:
            metrics['success_rate'] = (
                self.generation_stats['successful_generations'] / total_requests
            )
            metrics['failure_rate'] = (
                self.generation_stats['failed_generations'] / total_requests
            )
            metrics['fallback_rate'] = (
                self.generation_stats['fallback_uses'] / total_requests
            )
        
        return metrics
    
    def reset_stats(self):
        """Reset performance statistics."""
        self.generation_stats = {
            'total_requests': 0,
            'successful_generations': 0,
            'failed_generations': 0,
            'fallback_uses': 0,
            'average_generation_time': 0.0,
            'backend_usage': {provider.value: 0 for provider in LLMProvider}
        }
        logger.info("LLM interface statistics reset")
    
    async def cleanup(self):
        """Cleanup resources."""
        for backend in self.backends.values():
            if hasattr(backend, 'session') and backend.session:
                await backend.session.close()
        
        logger.info("LLM interface cleanup completed")