import asyncio
from typing import Optional, Any, Dict
from ..models.schemas import ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse


class TransformersBackend:
    """Transformers backend implementation."""
    
    def __init__(self):
        self.supported = False
        try:
            from transformers import AutoTokenizer, AutoModelForCausalLM
            self.AutoTokenizer = AutoTokenizer
            self.AutoModelForCausalLM = AutoModelForCausalLM
            self.supported = True
        except ImportError:
            pass
    
    async def load_model(self, model_name: str, model_path: Optional[str] = None):
        """Load a model using Transformers."""
        if not self.supported:
            raise ImportError("transformers is not installed. Please install it with 'pip install transformers'")
        
        model_path = model_path or model_name
        tokenizer = self.AutoTokenizer.from_pretrained(model_path)
        
        # Add padding token if it doesn't exist
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        model = self.AutoModelForCausalLM.from_pretrained(
            model_path,
            torch_dtype="auto",
            device_map="auto"
        )
        
        return {
            "model_name": model_name,
            "model_path": model_path,
            "model": model,
            "tokenizer": tokenizer,
            "backend": "transformers"
        }
    
    async def unload_model(self, model: Any):
        """Unload a model."""
        # Clean up model and tokenizer
        del model["model"]
        del model["tokenizer"]
        import gc
        import torch
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
    
    async def generate_chat(self, model: Any, request: ChatCompletionRequest) -> ChatCompletionResponse:
        """Generate a chat completion."""
        if not self.supported:
            raise ImportError("transformers is not installed. Please install it with 'pip install transformers'")
        
        import time
        import uuid
        from ..models.schemas import ChatMessage, Choice, Usage
        import torch
        from ..utils import format_multimodal_content
        
        tokenizer = model["tokenizer"]
        model_obj = model["model"]
        
        # Check if this is a multimodal model by name
        is_multimodal = any(keyword in model['model_name'].lower() for keyword in ['qwen', 'minicpm', 'vl', 'vision', 'llava'])
        
        if is_multimodal:
            # Handle multimodal generation (Qwen, MiniCPM-V, etc.)
            response_text = await self._generate_multimodal_response(model, request)
        else:
            # Handle regular text generation
            conversation = ""
            for msg in request.messages:
                role = msg.role.value
                content = format_multimodal_content(msg.content)
                conversation += f"{role}: {content}\\n"
            conversation += "assistant:"
            
            # Tokenize the conversation
            inputs = tokenizer(conversation, return_tensors="pt", truncation=True)
            if torch.cuda.is_available():
                inputs = {k: v.cuda() for k, v in inputs.items()}
            
            # Generate response
            with torch.no_grad():
                outputs = model_obj.generate(
                    **inputs,
                    max_new_tokens=min(request.max_tokens, 512),  # Limit to prevent long generation
                    temperature=request.temperature,
                    top_p=request.top_p,
                    do_sample=True,
                    pad_token_id=tokenizer.pad_token_id
                )
            
            # Decode the response
            response_text = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
        
        choice = Choice(
            index=0,
            message=ChatMessage(
                role="assistant",
                content=response_text
            ),
            finish_reason="stop"
        )
        
        # Calculate token usage (placeholder values since token counting differs for multimodal)
        prompt_tokens = len(conversation) if 'conversation' in locals() else len(str(request.messages))
        completion_tokens = len(tokenizer.encode(response_text)) if not is_multimodal else len(response_text.split())
        total_tokens = prompt_tokens + completion_tokens
        
        response = ChatCompletionResponse(
            id=f"chatcmpl-{uuid.uuid4().hex}",
            model=model['model_name'],
            choices=[choice],
            usage=Usage(
                prompt_tokens=prompt_tokens,
                completion_tokens=completion_tokens,
                total_tokens=total_tokens
            )
        )
        
        return response

    async def _generate_multimodal_response(self, model: Any, request: ChatCompletionRequest) -> str:
        """Handle multimodal generation for models like Qwen and MiniCPM-V."""
        import torch
        
        tokenizer = model["tokenizer"]
        model_obj = model["model"]
        
        # For multimodal models, process content differently
        # This is a simplified implementation - real multimodal models have specific requirements
        conversation = ""
        for msg in request.messages:
            role = msg.role.value
            # Use the utility function to handle multimodal content properly
            content = msg.content
            if isinstance(content, list):
                # Handle multimodal content (text and potentially images)
                text_content = ""
                for item in content:
                    if isinstance(item, dict):
                        if item.get("type") == "text":
                            text_content += item.get("text", "")
                        elif item.get("type") == "image_url":
                            # For now, just add a placeholder for images
                            text_content += " [IMAGE] "
                    else:
                        text_content += str(item)
                conversation += f"{role}: {text_content}\\n"
            else:
                conversation += f"{role}: {content}\\n"
        conversation += "assistant:"
        
        # Tokenize the conversation
        inputs = tokenizer(conversation, return_tensors="pt", truncation=True)
        if torch.cuda.is_available():
            inputs = {k: v.cuda() for k, v in inputs.items()}
        
        # Generate response
        with torch.no_grad():
            outputs = model_obj.generate(
                **inputs,
                max_new_tokens=min(request.max_tokens, 512),  # Limit to prevent long generation
                temperature=request.temperature,
                top_p=request.top_p,
                do_sample=True,
                pad_token_id=tokenizer.pad_token_id if hasattr(tokenizer, 'pad_token_id') else tokenizer.eos_token_id
            )
        
        # Decode the response
        response_text = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
        return response_text
    
    async def generate_completion(self, model: Any, request: CompletionRequest) -> CompletionResponse:
        """Generate a text completion."""
        if not self.supported:
            raise ImportError("transformers is not installed. Please install it with 'pip install transformers'")
        
        import time
        import uuid
        import torch
        
        tokenizer = model["tokenizer"]
        model_obj = model["model"]
        
        prompt = request.prompt if isinstance(request.prompt, str) else request.prompt[0]
        
        # Tokenize the prompt
        inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
        if torch.cuda.is_available():
            inputs = {k: v.cuda() for k, v in inputs.items()}
        
        # Generate response
        with torch.no_grad():
            outputs = model_obj.generate(
                **inputs,
                max_new_tokens=min(request.max_tokens, 512),  # Limit to prevent long generation
                temperature=request.temperature,
                top_p=request.top_p,
                do_sample=True,
                pad_token_id=tokenizer.pad_token_id
            )
        
        # Decode the response
        response_text = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
        
        choice = {
            "text": response_text,
            "index": 0,
            "logprobs": None,
            "finish_reason": "length"
        }
        
        # Calculate token usage
        prompt_tokens = inputs['input_ids'].shape[1]
        completion_tokens = len(tokenizer.encode(response_text))
        total_tokens = prompt_tokens + completion_tokens
        
        response = CompletionResponse(
            id=f"cmpl-{uuid.uuid4().hex}",
            model=model['model_name'],
            choices=[choice],
            usage=Usage(
                prompt_tokens=prompt_tokens,
                completion_tokens=completion_tokens,
                total_tokens=total_tokens
            )
        )
        
        return response