"""Chat model implementation"""

from typing import Dict, List, Optional, Any
import asyncio

from modules.base.base_model import BaseModel
from modules.base.exceptions import ModelLoadError, InvalidInputError
from utils.validators import validate_text_input
from .memory import ConversationMemory

class ChatModel(BaseModel):
    """Chat model implementation using Hugging Face models"""
    
    def __init__(self, model_name: str, config: Optional[Dict] = None):
        super().__init__(model_name, config)
        self.memory = ConversationMemory(
            max_entries=self.config.get("max_memory_entries", 100)
        )
        
    async def load(self) -> None:
        """Load the chat model"""
        try:
            from transformers import AutoModelForCausalLM, AutoTokenizer
            
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
            
            self.logger.info(f"Loaded chat model: {self.model_name}")
        except Exception as e:
            raise ModelLoadError(f"Failed to load chat model: {str(e)}")
    
    async def validate_input(self, input_data: str) -> bool:
        """Validate chat input"""
        try:
            return validate_text_input(
                input_data,
                min_length=1,
                max_length=self.config.get("max_input_length", 1000)
            )
        except InvalidInputError as e:
            self.logger.error(f"Input validation failed: {str(e)}")
            raise
    
    async def predict(self, input_data: str) -> str:
        """Generate chat response"""
        await super().predict(input_data)  # Call parent for validation
        
        try:
            # Add conversation context
            context = self.memory.get_context()
            full_prompt = f"{context}\nUser: {input_data}\nAssistant:"
            
            # Tokenize and generate
            inputs = self.tokenizer(full_prompt, return_tensors="pt")
            outputs = self.model.generate(
                inputs["input_ids"],
                max_length=self.config.get("max_length", 1024),
                temperature=self.config.get("temperature", 0.7),
                pad_token_id=self.tokenizer.eos_token_id
            )
            
            response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # Update conversation memory
            self.memory.add_exchange(input_data, response)
            
            return response
            
        except Exception as e:
            self.logger.error(f"Prediction failed: {str(e)}")
            raise
