"""
模型对比数据模型
定义用于存储和管理多模型对比会话的数据结构
"""

from datetime import datetime
from typing import List, Dict, Any, Optional
from pydantic import BaseModel, Field, validator
from bson import ObjectId
from enum import Enum


class ModelInfo(BaseModel):
    """参与对比的模型信息"""
    name: str = Field(..., description="模型名称")
    version: str = Field(..., description="模型版本")
    parameters: Dict[str, Any] = Field(default_factory=dict, description="生成参数")
    
    class Config:
        schema_extra = {
            "example": {
                "name": "gpt-4",
                "version": "latest",
                "parameters": {
                    "temperature": 0.7,
                    "max_tokens": 2000,
                    "top_p": 0.9
                }
            }
        }


class ComparisonStatus(str, Enum):
    """对比任务状态"""
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"
    PARTIAL = "partial"  # 部分成功


class ModelResult(BaseModel):
    """单个模型的生成结果"""
    model: str = Field(..., description="模型名称")
    content: str = Field("", description="生成的内容")
    tokens: int = Field(0, description="Token使用量")
    time: float = Field(0.0, description="生成时间（秒）")
    score: Dict[str, float] = Field(default_factory=dict, description="质量评分")
    status: str = Field("pending", description="生成状态")
    error: Optional[str] = Field(None, description="错误信息")
    
    class Config:
        schema_extra = {
            "example": {
                "model": "gpt-4",
                "content": "这是生成的内容...",
                "tokens": 1500,
                "time": 2.5,
                "score": {
                    "quality": 0.85,
                    "relevance": 0.90,
                    "creativity": 0.75,
                    "fluency": 0.88
                },
                "status": "completed",
                "error": None
            }
        }


class ComparisonSession(BaseModel):
    """模型对比会话"""
    id: Optional[str] = Field(None, alias="_id")
    session_id: str = Field(..., description="对比会话ID")
    user_id: str = Field(..., description="用户ID")
    prompt: str = Field(..., description="输入提示词")
    system_prompt: Optional[str] = Field(None, description="系统提示词")
    models: List[ModelInfo] = Field(..., description="参与对比的模型列表")
    results: List[ModelResult] = Field(default_factory=list, description="各模型的结果")
    selected: Optional[str] = Field(None, description="最终选择的模型")
    merged_content: Optional[str] = Field(None, description="合并编辑后的内容")
    status: ComparisonStatus = Field(ComparisonStatus.PENDING, description="对比状态")
    created_at: datetime = Field(default_factory=datetime.utcnow, description="创建时间")
    updated_at: datetime = Field(default_factory=datetime.utcnow, description="更新时间")
    metadata: Dict[str, Any] = Field(default_factory=dict, description="A/B测试数据等元数据")
    
    class Config:
        allow_population_by_field_name = True
        json_encoders = {
            ObjectId: str,
            datetime: lambda v: v.isoformat()
        }
        schema_extra = {
            "example": {
                "session_id": "comp_20250121_001",
                "user_id": "user_123",
                "prompt": "请帮我写一篇关于人工智能的文章",
                "system_prompt": "你是一个专业的技术作家",
                "models": [
                    {
                        "name": "gpt-4",
                        "version": "latest",
                        "parameters": {"temperature": 0.7}
                    },
                    {
                        "name": "qwen-max",
                        "version": "latest",
                        "parameters": {"temperature": 0.6}
                    }
                ],
                "results": [],
                "status": "pending",
                "metadata": {
                    "source": "web_ui",
                    "experiment_id": "exp_001"
                }
            }
        }
    
    @validator('updated_at', pre=True, always=True)
    def set_updated_at(cls, v):
        """自动更新时间戳"""
        return datetime.utcnow()
    
    def to_mongo(self) -> dict:
        """转换为MongoDB文档格式"""
        data = self.dict(by_alias=True, exclude_unset=True)
        if data.get("_id") is None:
            data.pop("_id", None)
        return data
    
    @classmethod
    def from_mongo(cls, data: dict) -> "ComparisonSession":
        """从MongoDB文档创建实例"""
        if data.get("_id"):
            data["_id"] = str(data["_id"])
        return cls(**data)


class ComparisonRequest(BaseModel):
    """创建对比请求"""
    prompt: str = Field(..., description="用户提示词")
    system_prompt: Optional[str] = Field(None, description="系统提示词")
    models: List[ModelInfo] = Field(..., description="要对比的模型")
    metadata: Dict[str, Any] = Field(default_factory=dict, description="额外元数据")
    
    class Config:
        schema_extra = {
            "example": {
                "prompt": "写一个Python快速排序算法",
                "system_prompt": "你是一个Python专家",
                "models": [
                    {
                        "name": "gpt-4",
                        "version": "latest",
                        "parameters": {"temperature": 0.5}
                    },
                    {
                        "name": "gpt-3.5-turbo",
                        "version": "latest",
                        "parameters": {"temperature": 0.5}
                    }
                ],
                "metadata": {
                    "category": "code_generation"
                }
            }
        }


class ComparisonResponse(BaseModel):
    """对比响应"""
    session_id: str = Field(..., description="会话ID")
    status: ComparisonStatus = Field(..., description="状态")
    results: List[ModelResult] = Field(default_factory=list, description="结果列表")
    aggregation: Dict[str, Any] = Field(default_factory=dict, description="聚合统计")
    
    class Config:
        schema_extra = {
            "example": {
                "session_id": "comp_20250121_001",
                "status": "completed",
                "results": [
                    {
                        "model": "gpt-4",
                        "content": "def quicksort(arr)...",
                        "tokens": 150,
                        "time": 1.2,
                        "score": {"quality": 0.9},
                        "status": "completed"
                    }
                ],
                "aggregation": {
                    "total_models": 2,
                    "successful": 2,
                    "average_time": 1.5,
                    "best_score": "gpt-4"
                }
            }
        }


class ComparisonHistory(BaseModel):
    """对比历史记录"""
    user_id: str = Field(..., description="用户ID")
    sessions: List[ComparisonSession] = Field(default_factory=list, description="会话列表")
    total_count: int = Field(0, description="总数")
    page: int = Field(1, description="当前页")
    page_size: int = Field(20, description="每页大小")
    
    class Config:
        schema_extra = {
            "example": {
                "user_id": "user_123",
                "sessions": [],
                "total_count": 50,
                "page": 1,
                "page_size": 20
            }
        }


class ModelSelectionEvent(BaseModel):
    """模型选择事件（用于A/B测试）"""
    session_id: str = Field(..., description="会话ID")
    user_id: str = Field(..., description="用户ID")
    selected_model: str = Field(..., description="选择的模型")
    timestamp: datetime = Field(default_factory=datetime.utcnow, description="选择时间")
    reason: Optional[str] = Field(None, description="选择原因")
    scores: Dict[str, float] = Field(default_factory=dict, description="各模型评分")
    user_feedback: Optional[Dict[str, Any]] = Field(None, description="用户反馈")
    
    class Config:
        json_encoders = {
            datetime: lambda v: v.isoformat()
        }
        schema_extra = {
            "example": {
                "session_id": "comp_20250121_001",
                "user_id": "user_123",
                "selected_model": "gpt-4",
                "reason": "更准确和详细",
                "scores": {
                    "gpt-4": 0.9,
                    "gpt-3.5-turbo": 0.75
                },
                "user_feedback": {
                    "satisfaction": 5,
                    "would_recommend": True
                }
            }
        }