"""
OpenAI compatible API router implementation
Supports standard ChatCompletion endpoints with streaming
"""
import json
import time
import uuid
import asyncio
from typing import Dict, Any, List, Optional, AsyncGenerator
from fastapi import APIRouter, HTTPException, Request, Response
from fastapi.responses import JSONResponse, StreamingResponse
from sse_starlette.sse import EventSourceResponse
import logging

from ..models.openai import (
    OpenAIRequest, OpenAIResponse, OpenAIErrorResponse, OpenAIError,
    Message, Choice, StreamChoice, StreamResponse, Usage,
    MessageRole, ModelInfo, ModelsResponse
)

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/v1", tags=["OpenAI Compatible API"])


class ModelRegistry:
    """Registry for available AI models"""
    
    def __init__(self):
        self._models: Dict[str, Dict[str, Any]] = {}
        self._register_default_models()
    
    def _register_default_models(self):
        """Register default available models"""
        self.register_model(
            model_id="gpt-3.5-turbo",
            model_info={
                "owned_by": "openai",
                "created": int(time.time()),
                "handler": self._mock_chat_completion
            }
        )
        
        self.register_model(
            model_id="gpt-4",
            model_info={
                "owned_by": "openai",
                "created": int(time.time()),
                "handler": self._mock_chat_completion
            }
        )
        
        self.register_model(
            model_id="claude-3-sonnet",
            model_info={
                "owned_by": "anthropic",
                "created": int(time.time()),
                "handler": self._mock_chat_completion
            }
        )
    
    def register_model(self, model_id: str, model_info: Dict[str, Any]):
        """Register a model with its handler"""
        self._models[model_id] = model_info
    
    def get_models_list(self) -> List[ModelInfo]:
        """Get list of available models"""
        return [
            ModelInfo(
                id=model_id,
                object="model",
                created=info["created"],
                owned_by=info["owned_by"]
            )
            for model_id, info in self._models.items()
        ]
    
    def get_model_handler(self, model_id: str):
        """Get model handler by ID"""
        return self._models.get(model_id, {}).get("handler")
    
    def model_exists(self, model_id: str) -> bool:
        """Check if model exists"""
        return model_id in self._models
    
    async def _mock_chat_completion(self, request: OpenAIRequest) -> str:
        """Mock chat completion implementation"""
        # This is a placeholder - replace with actual AI model integration
        last_message = request.messages[-1] if request.messages else None
        if not last_message or not last_message.content:
            return "I'm sorry, I didn't receive any message to respond to."
        
        # Simple echo-based response for demo
        return f"This is a mock response to: '{last_message.content}' from model {request.model}"


# Global model registry
model_registry = ModelRegistry()


def create_error_response(error_type: str, message: str, param: Optional[str] = None, code: Optional[str] = None) -> OpenAIErrorResponse:
    """Create OpenAI-style error response"""
    return OpenAIErrorResponse(
        error=OpenAIError(
            message=message,
            type=error_type,
            param=param,
            code=code
        )
    )


@router.get("/models", response_model=ModelsResponse)
async def list_models():
    """
    List available models
    Compatible with OpenAI's /v1/models endpoint
    """
    models = model_registry.get_models_list()
    return ModelsResponse(object="list", data=models)


@router.post("/chat/completions")
async def create_chat_completion(request: OpenAIRequest) -> Response:
    """
    Create a chat completion
    Compatible with OpenAI's /v1/chat/completions endpoint
    """
    try:
        # Validate model
        if not model_registry.model_exists(request.model):
            error_response = create_error_response(
                error_type="invalid_request_error",
                message=f"The model '{request.model}' does not exist",
                param="model"
            )
            return JSONResponse(
                status_code=404,
                content=error_response.model_dump()
            )
        
        # Validate messages
        if not request.messages:
            error_response = create_error_response(
                error_type="invalid_request_error",
                message="The 'messages' field is required and cannot be empty",
                param="messages"
            )
            return JSONResponse(
                status_code=400,
                content=error_response.model_dump()
            )
        
        # Handle streaming
        if request.stream:
            return await create_streaming_completion(request)
        else:
            return await create_non_streaming_completion(request)
    
    except ValueError as e:
        logger.error(f"Validation error: {str(e)}")
        error_response = create_error_response(
            error_type="invalid_request_error",
            message=str(e)
        )
        return JSONResponse(
            status_code=400,
            content=error_response.model_dump()
        )
    
    except Exception as e:
        logger.error(f"Internal error in chat completion: {str(e)}")
        error_response = create_error_response(
            error_type="internal_error",
            message="An internal error occurred"
        )
        return JSONResponse(
            status_code=500,
            content=error_response.model_dump()
        )


async def create_non_streaming_completion(request: OpenAIRequest) -> JSONResponse:
    """Create non-streaming chat completion response"""
    completion_id = f"chatcmpl-{uuid.uuid4().hex[:29]}"
    created_timestamp = int(time.time())
    
    # Get model handler and generate response
    handler = model_registry.get_model_handler(request.model)
    if not handler:
        raise ValueError(f"No handler found for model: {request.model}")
    
    # Generate completion
    try:
        if asyncio.iscoroutinefunction(handler):
            completion_text = await handler(request)
        else:
            completion_text = handler(request)
    except Exception as e:
        logger.error(f"Error generating completion: {str(e)}")
        completion_text = "I apologize, but I encountered an error while processing your request."
    
    # Create response message
    response_message = Message(
        role=MessageRole.ASSISTANT,
        content=completion_text
    )
    
    # Create choice
    choice = Choice(
        index=0,
        message=response_message,
        finish_reason="stop"
    )
    
    # Calculate token usage (mock values for demo)
    prompt_tokens = sum(len((msg.content or "").split()) for msg in request.messages)
    completion_tokens = len(completion_text.split())
    
    usage = Usage(
        prompt_tokens=prompt_tokens,
        completion_tokens=completion_tokens,
        total_tokens=prompt_tokens + completion_tokens
    )
    
    # Create final response
    response = OpenAIResponse(
        id=completion_id,
        object="chat.completion",
        created=created_timestamp,
        model=request.model,
        choices=[choice],
        usage=usage
    )
    
    return JSONResponse(content=response.model_dump())


async def create_streaming_completion(request: OpenAIRequest) -> StreamingResponse:
    """Create streaming chat completion response"""
    
    async def generate_stream():
        completion_id = f"chatcmpl-{uuid.uuid4().hex[:29]}"
        created_timestamp = int(time.time())
        
        # Get model handler and generate response
        handler = model_registry.get_model_handler(request.model)
        if not handler:
            # Send error in streaming format
            error_chunk = {
                "error": {
                    "message": f"No handler found for model: {request.model}",
                    "type": "invalid_request_error"
                }
            }
            yield f"data: {json.dumps(error_chunk)}\n\n"
            yield "data: [DONE]\n\n"
            return
        
        try:
            # Generate completion (in real implementation, this would be streamed)
            if asyncio.iscoroutinefunction(handler):
                completion_text = await handler(request)
            else:
                completion_text = handler(request)
            
            # Split text into chunks for streaming effect
            words = completion_text.split()
            
            for i, word in enumerate(words):
                # Create delta message
                if i == 0:
                    delta = Message(role=MessageRole.ASSISTANT, content=word + " ")
                else:
                    delta = Message(content=word + " ")
                
                # Create stream choice
                stream_choice = StreamChoice(
                    index=0,
                    delta=delta,
                    finish_reason=None
                )
                
                # Create stream response
                stream_response = StreamResponse(
                    id=completion_id,
                    object="chat.completion.chunk",
                    created=created_timestamp,
                    model=request.model,
                    choices=[stream_choice]
                )
                
                yield f"data: {stream_response.model_dump_json()}\n\n"
                
                # Small delay to simulate streaming
                await asyncio.sleep(0.1)
            
            # Send final chunk with finish reason
            final_choice = StreamChoice(
                index=0,
                delta=Message(content=""),
                finish_reason="stop"
            )
            
            final_response = StreamResponse(
                id=completion_id,
                object="chat.completion.chunk",
                created=created_timestamp,
                model=request.model,
                choices=[final_choice]
            )
            
            # Include usage in final chunk if requested
            if request.stream_options and request.stream_options.include_usage:
                prompt_tokens = sum(len((msg.content or "").split()) for msg in request.messages)
                completion_tokens = len(completion_text.split())
                final_response.usage = Usage(
                    prompt_tokens=prompt_tokens,
                    completion_tokens=completion_tokens,
                    total_tokens=prompt_tokens + completion_tokens
                )
            
            yield f"data: {final_response.model_dump_json()}\n\n"
            
        except Exception as e:
            logger.error(f"Error in streaming completion: {str(e)}")
            error_chunk = {
                "error": {
                    "message": "An error occurred while generating the response",
                    "type": "internal_error"
                }
            }
            yield f"data: {json.dumps(error_chunk)}\n\n"
        
        # End stream
        yield "data: [DONE]\n\n"
    
    return StreamingResponse(
        generate_stream(),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "text/event-stream"
        }
    )


@router.get("/models/{model_id}")
async def get_model(model_id: str):
    """
    Get specific model information
    Compatible with OpenAI's /v1/models/{model} endpoint
    """
    models = model_registry.get_models_list()
    model = next((m for m in models if m.id == model_id), None)
    
    if not model:
        error_response = create_error_response(
            error_type="not_found_error",
            message=f"The model '{model_id}' does not exist"
        )
        return JSONResponse(
            status_code=404,
            content=error_response.model_dump()
        )
    
    return JSONResponse(content=model.model_dump())


# Export the model registry for use by other modules
__all__ = ["router", "model_registry", "ModelRegistry"]