from pydantic import BaseModel, Field, UUID4
from typing import List, Dict, Optional, Union
from datetime import datetime

class Message(BaseModel):
    """消息模型"""
    role: str
    content: str
    name: Optional[str] = None
    function_call: Optional[Dict] = None

class ChatRequest(BaseModel):
    """聊天请求模型"""
    session_id: UUID4
    message: Optional[str] = None  # 保持向后兼容
    messages: Optional[List[Message]] = None  # 支持数组格式
    use_rag: bool = True
    use_tools: bool = True
    model: Optional[str] = None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    max_tokens: Optional[int] = None

class ChatResponse(BaseModel):
    """聊天响应模型"""
    session_id: UUID4
    message_id: UUID4
    response: str
    rag_context: Optional[List[Dict]] = None
    tool_calls: Optional[List[Dict]] = None
    model_info: Optional[Dict] = None
    performance: Optional[Dict] = None

class MessageHistoryRequest(BaseModel):
    """获取消息历史请求模型"""
    session_id: UUID4
    limit: int = 20

class MessageHistoryResponse(BaseModel):
    """获取消息历史响应模型"""
    session_id: UUID4
    messages: List[Dict]
    total: int

class LearnRequest(BaseModel):
    """学习请求模型"""
    user_query: str
    assistant_response: str
    corrected_response: Optional[str] = None
    feedback: Optional[str] = None
    is_positive: Optional[bool] = None

class LearnResponse(BaseModel):
    """学习响应模型"""
    success: bool
    message: str
    data: Optional[Dict] = None

class DocumentUploadRequest(BaseModel):
    """文档上传请求模型"""
    file_path: str
    metadata: Optional[Dict] = None

class DocumentUploadResponse(BaseModel):
    """文档上传响应模型"""
    success: bool
    message: str
    document_id: Optional[int] = None
    chunks_count: Optional[int] = None

class ToolCallRequest(BaseModel):
    """工具调用请求模型"""
    tool_name: str
    parameters: Dict

class ToolCallResponse(BaseModel):
    """工具调用响应模型"""
    success: bool
    data: Optional[Dict] = None
    error: Optional[str] = None

class ModelInfoResponse(BaseModel):
    """模型信息响应模型"""
    name: str
    is_loaded: bool
    load_time: float
    quantized: bool
    quantization_type: Optional[str] = None
    quantization_bits: Optional[int] = None
    lora_enabled: bool
    lora_adapter_name: Optional[str] = None
    device: Optional[str] = None

class RAGStatsResponse(BaseModel):
    """RAG统计信息响应模型"""
    total_documents: int
    embedding_model: str
    chunk_size: int
    recent_documents: List[Dict]

class LearningStatsResponse(BaseModel):
    """学习统计信息响应模型"""
    total_learning_data: int
    used_learning_data: int
    unused_learning_data: int
    positive_feedback: int
    negative_feedback: int
    available_adapters: List[str]
    current_adapter: Optional[str] = None

class PerformanceMetricsResponse(BaseModel):
    """性能指标响应模型"""
    session_id: Optional[UUID4] = None
    request_id: Optional[UUID4] = None
    model_name: str
    processing_time: float
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int
    success: bool
    error_message: Optional[str] = None
    created_at: datetime

class FullTrainingCycleRequest(BaseModel):
    """完整训练周期请求模型"""
    max_samples: Optional[int] = None
    adapter_name: Optional[str] = None

class FullTrainingCycleResponse(BaseModel):
    """完整训练周期响应模型"""
    success: bool
    message: str
    adapter_name: Optional[str] = None

class HotReloadAdapterRequest(BaseModel):
    """热加载适配器请求模型"""
    adapter_name: str

class HotReloadAdapterResponse(BaseModel):
    """热加载适配器响应模型"""
    success: bool
    message: str

class SystemStatusResponse(BaseModel):
    """系统状态响应模型"""
    model: ModelInfoResponse
    rag: RAGStatsResponse
    learning: LearningStatsResponse
    database: Dict
    api: Dict

class ErrorResponse(BaseModel):
    """错误响应模型"""
    detail: str
    error_code: Optional[int] = None
    error_type: Optional[str] = None