"""Model metadata API endpoints."""

from fastapi import APIRouter, Depends, HTTPException, status
from typing import List, Dict, Any, Optional
from datetime import datetime
from pydantic import BaseModel, Field
import logging
from motor.motor_asyncio import AsyncIOMotorDatabase

from ..core.auth import get_current_user
from ..core.database import get_db

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/api/v1/models/metadata", tags=["model-metadata"])


class ModelPricing(BaseModel):
    """Model pricing information."""
    
    input_price_per_1k: Optional[float] = Field(None, description="Price per 1K input tokens")
    output_price_per_1k: Optional[float] = Field(None, description="Price per 1K output tokens")
    currency: str = Field(default="USD")
    billing_unit: str = Field(default="tokens")
    free_tier: Optional[int] = Field(None, description="Free tokens per month")
    

class ModelCapabilities(BaseModel):
    """Model capabilities information."""
    
    max_tokens: int = Field(default=4096)
    supports_streaming: bool = Field(default=True)
    supports_function_calling: bool = Field(default=False)
    supports_vision: bool = Field(default=False)
    supports_json_mode: bool = Field(default=False)
    supports_system_prompt: bool = Field(default=True)
    context_window: int = Field(default=4096)
    languages_supported: List[str] = Field(default_factory=lambda: ["en"])
    fine_tunable: bool = Field(default=False)
    

class ModelMetadata(BaseModel):
    """Complete model metadata."""
    
    id: str
    name: str
    display_name: str
    provider: str
    provider_name: str
    description: Optional[str] = None
    version: Optional[str] = None
    release_date: Optional[datetime] = None
    deprecation_date: Optional[datetime] = None
    status: str = Field(default="active", pattern="^(active|deprecated|beta|preview)$")
    capabilities: ModelCapabilities
    pricing: Optional[ModelPricing] = None
    recommended_use_cases: List[str] = Field(default_factory=list)
    tags: List[str] = Field(default_factory=list)
    documentation_url: Optional[str] = None
    last_updated: datetime
    
    class Config:
        json_encoders = {
            datetime: lambda v: v.isoformat() if v else None
        }


class ModelStatistics(BaseModel):
    """Model usage statistics."""
    
    model_id: str
    total_requests: int = 0
    total_tokens: int = 0
    total_cost: float = 0.0
    average_response_time: float = 0.0
    success_rate: float = 100.0
    last_used: Optional[datetime] = None
    period_start: datetime
    period_end: datetime
    
    class Config:
        json_encoders = {
            datetime: lambda v: v.isoformat() if v else None
        }


# Predefined metadata for common models
DEFAULT_MODEL_METADATA = {
    "gpt-4": ModelMetadata(
        id="gpt-4",
        name="gpt-4",
        display_name="GPT-4",
        provider="openai",
        provider_name="OpenAI",
        description="Most capable GPT-4 model, better at complex tasks",
        version="gpt-4-0613",
        capabilities=ModelCapabilities(
            max_tokens=8192,
            supports_streaming=True,
            supports_function_calling=True,
            supports_vision=False,
            supports_json_mode=True,
            context_window=8192,
            languages_supported=["en", "zh", "es", "fr", "de", "ja", "ko", "ru", "ar", "hi"]
        ),
        pricing=ModelPricing(
            input_price_per_1k=0.03,
            output_price_per_1k=0.06
        ),
        recommended_use_cases=["Complex reasoning", "Creative writing", "Code generation", "Analysis"],
        tags=["advanced", "reasoning", "creative"],
        documentation_url="https://platform.openai.com/docs/models/gpt-4",
        last_updated=datetime.utcnow()
    ),
    "gpt-3.5-turbo": ModelMetadata(
        id="gpt-3.5-turbo",
        name="gpt-3.5-turbo",
        display_name="GPT-3.5 Turbo",
        provider="openai",
        provider_name="OpenAI",
        description="Fast and efficient model for most tasks",
        version="gpt-3.5-turbo-0613",
        capabilities=ModelCapabilities(
            max_tokens=4096,
            supports_streaming=True,
            supports_function_calling=True,
            supports_vision=False,
            supports_json_mode=True,
            context_window=16384,
            languages_supported=["en", "zh", "es", "fr", "de", "ja", "ko", "ru"]
        ),
        pricing=ModelPricing(
            input_price_per_1k=0.0005,
            output_price_per_1k=0.0015
        ),
        recommended_use_cases=["Chatbots", "Quick tasks", "Drafting", "Simple coding"],
        tags=["fast", "efficient", "general-purpose"],
        documentation_url="https://platform.openai.com/docs/models/gpt-3-5",
        last_updated=datetime.utcnow()
    ),
    "claude-3-opus": ModelMetadata(
        id="claude-3-opus",
        name="claude-3-opus-20240229",
        display_name="Claude 3 Opus",
        provider="anthropic",
        provider_name="Anthropic",
        description="Most powerful Claude model for complex tasks",
        version="20240229",
        capabilities=ModelCapabilities(
            max_tokens=4096,
            supports_streaming=True,
            supports_function_calling=False,
            supports_vision=True,
            context_window=200000,
            languages_supported=["en", "zh", "es", "fr", "de", "ja", "ko", "ru", "ar"]
        ),
        pricing=ModelPricing(
            input_price_per_1k=0.015,
            output_price_per_1k=0.075
        ),
        recommended_use_cases=["Research", "Analysis", "Complex reasoning", "Long documents"],
        tags=["advanced", "long-context", "multimodal"],
        documentation_url="https://docs.anthropic.com/claude/docs",
        last_updated=datetime.utcnow()
    ),
    "deepseek-chat": ModelMetadata(
        id="deepseek-chat",
        name="deepseek-chat",
        display_name="DeepSeek Chat",
        provider="deepseek",
        provider_name="DeepSeek",
        description="Efficient Chinese-English bilingual model",
        version="v2",
        capabilities=ModelCapabilities(
            max_tokens=4096,
            supports_streaming=True,
            supports_function_calling=True,
            context_window=32768,
            languages_supported=["en", "zh"]
        ),
        pricing=ModelPricing(
            input_price_per_1k=0.0001,
            output_price_per_1k=0.0002
        ),
        recommended_use_cases=["Chinese content", "Bilingual tasks", "Cost-effective generation"],
        tags=["bilingual", "cost-effective", "chinese"],
        documentation_url="https://platform.deepseek.com/docs",
        last_updated=datetime.utcnow()
    ),
    "qwen-max": ModelMetadata(
        id="qwen-max",
        name="qwen-max",
        display_name="Qwen Max",
        provider="alibaba",
        provider_name="Alibaba Cloud",
        description="Alibaba's most capable language model",
        version="v1",
        capabilities=ModelCapabilities(
            max_tokens=8192,
            supports_streaming=True,
            supports_function_calling=True,
            context_window=30720,
            languages_supported=["en", "zh"]
        ),
        pricing=ModelPricing(
            input_price_per_1k=0.004,
            output_price_per_1k=0.012
        ),
        recommended_use_cases=["Chinese content", "E-commerce", "Business analysis"],
        tags=["chinese", "commercial", "alibaba"],
        documentation_url="https://help.aliyun.com/document_detail/2399482.html",
        last_updated=datetime.utcnow()
    )
}


@router.get("", response_model=List[ModelMetadata])
async def get_all_model_metadata(
    provider: Optional[str] = None,
    status: Optional[str] = None,
    current_user: dict = Depends(get_current_user),
    db: AsyncIOMotorDatabase = Depends(get_db)
) -> List[ModelMetadata]:
    """
    Get metadata for all available models.
    
    Args:
        provider: Filter by provider
        status: Filter by status (active, deprecated, beta, preview)
    
    Returns:
        List of model metadata
    """
    try:
        # Build query
        query = {}
        if provider:
            query["provider"] = provider
        if status:
            query["status"] = status
        
        # Get custom metadata from database
        custom_metadata = await db.model_metadata.find(query).to_list(None)
        
        # Combine with default metadata
        all_metadata = list(DEFAULT_MODEL_METADATA.values())
        
        # Add custom metadata
        for custom in custom_metadata:
            # Check if it overrides a default
            found = False
            for i, default in enumerate(all_metadata):
                if default.id == custom["id"]:
                    all_metadata[i] = ModelMetadata(**custom)
                    found = True
                    break
            
            if not found:
                all_metadata.append(ModelMetadata(**custom))
        
        # Apply filters
        if provider:
            all_metadata = [m for m in all_metadata if m.provider == provider]
        if status:
            all_metadata = [m for m in all_metadata if m.status == status]
        
        return all_metadata
        
    except Exception as e:
        logger.error(f"Failed to get model metadata: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="Failed to retrieve model metadata"
        )


@router.get("/{model_id}", response_model=ModelMetadata)
async def get_model_metadata(
    model_id: str,
    current_user: dict = Depends(get_current_user),
    db: AsyncIOMotorDatabase = Depends(get_db)
) -> ModelMetadata:
    """
    Get metadata for a specific model.
    
    Args:
        model_id: Model ID
    
    Returns:
        Model metadata
    """
    try:
        # Check custom metadata first
        custom = await db.model_metadata.find_one({"id": model_id})
        
        if custom:
            return ModelMetadata(**custom)
        
        # Check default metadata
        if model_id in DEFAULT_MODEL_METADATA:
            return DEFAULT_MODEL_METADATA[model_id]
        
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"Metadata not found for model: {model_id}"
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get metadata for model {model_id}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="Failed to retrieve model metadata"
        )


@router.get("/{model_id}/pricing", response_model=ModelPricing)
async def get_model_pricing(
    model_id: str,
    current_user: dict = Depends(get_current_user),
    db: AsyncIOMotorDatabase = Depends(get_db)
) -> ModelPricing:
    """
    Get pricing information for a model.
    
    Args:
        model_id: Model ID
    
    Returns:
        Model pricing information
    """
    try:
        metadata = await get_model_metadata(model_id, current_user, db)
        
        if not metadata.pricing:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=f"Pricing information not available for model: {model_id}"
            )
        
        return metadata.pricing
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get pricing for model {model_id}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="Failed to retrieve pricing information"
        )


@router.get("/{model_id}/statistics", response_model=ModelStatistics)
async def get_model_statistics(
    model_id: str,
    start_date: Optional[datetime] = None,
    end_date: Optional[datetime] = None,
    current_user: dict = Depends(get_current_user),
    db: AsyncIOMotorDatabase = Depends(get_db)
) -> ModelStatistics:
    """
    Get usage statistics for a model.
    
    Args:
        model_id: Model ID
        start_date: Start date for statistics period
        end_date: End date for statistics period
    
    Returns:
        Model usage statistics
    """
    try:
        # Default to last 30 days if no dates provided
        if not end_date:
            end_date = datetime.utcnow()
        if not start_date:
            start_date = end_date - timedelta(days=30)
        
        # Get usage data from database
        pipeline = [
            {
                "$match": {
                    "user_id": current_user["sub"],
                    "model_id": model_id,
                    "timestamp": {"$gte": start_date, "$lte": end_date}
                }
            },
            {
                "$group": {
                    "_id": "$model_id",
                    "total_requests": {"$sum": 1},
                    "total_tokens": {"$sum": "$tokens_used"},
                    "total_cost": {"$sum": "$cost"},
                    "avg_response_time": {"$avg": "$response_time"},
                    "success_count": {
                        "$sum": {"$cond": [{"$eq": ["$status", "success"]}, 1, 0]}
                    },
                    "last_used": {"$max": "$timestamp"}
                }
            }
        ]
        
        result = await db.model_usage.aggregate(pipeline).to_list(1)
        
        if result:
            data = result[0]
            success_rate = (data["success_count"] / data["total_requests"] * 100) if data["total_requests"] > 0 else 100.0
            
            return ModelStatistics(
                model_id=model_id,
                total_requests=data["total_requests"],
                total_tokens=data.get("total_tokens", 0),
                total_cost=data.get("total_cost", 0.0),
                average_response_time=data.get("avg_response_time", 0.0),
                success_rate=success_rate,
                last_used=data.get("last_used"),
                period_start=start_date,
                period_end=end_date
            )
        else:
            # Return empty statistics
            return ModelStatistics(
                model_id=model_id,
                period_start=start_date,
                period_end=end_date
            )
        
    except Exception as e:
        logger.error(f"Failed to get statistics for model {model_id}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="Failed to retrieve model statistics"
        )


@router.post("/update-notifications", response_model=Dict[str, Any])
async def subscribe_to_updates(
    model_ids: List[str],
    current_user: dict = Depends(get_current_user),
    db: AsyncIOMotorDatabase = Depends(get_db)
) -> Dict[str, Any]:
    """
    Subscribe to update notifications for specific models.
    
    Args:
        model_ids: List of model IDs to subscribe to
    
    Returns:
        Subscription confirmation
    """
    try:
        # Store subscription preferences
        await db.model_subscriptions.update_one(
            {"user_id": current_user["sub"]},
            {
                "$set": {
                    "model_ids": model_ids,
                    "updated_at": datetime.utcnow()
                }
            },
            upsert=True
        )
        
        return {
            "subscribed": True,
            "model_ids": model_ids,
            "message": f"Subscribed to updates for {len(model_ids)} models"
        }
        
    except Exception as e:
        logger.error(f"Failed to subscribe to model updates: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="Failed to subscribe to model updates"
        )