from pydantic import BaseModel, Field, field_validator, ConfigDict
from typing import Optional, List, Literal, Annotated
from datetime import datetime

# 题型定义（使用Literal类型增强类型安全）
QuestionType = Literal["single_choice", "fill_blank", "short_answer"]


class QuestionBase(BaseModel):
    """基础模型：包含创建和查询共有的字段"""
    content: Annotated[str, Field(min_length=5, max_length=2000, description="题目内容，至少5个字符")]
    question_type: Annotated[QuestionType, Field(description="题型")]
    difficulty: Annotated[float, Field(ge=0, le=5, description="难度系数，0-5之间")]
    answer: Annotated[str, Field(min_length=1, description="标准答案")]
    analysis: Annotated[Optional[str], Field(max_length=5000, description="题目解析（可选）")] = None
    grade: Annotated[str, Field(min_length=2, max_length=50, description="所属年纪，如高一")]
    subject: Annotated[str, Field(min_length=2, max_length=50, description="课程名字，如数学")]


class QuestionCreate(QuestionBase):
    """创建题目时的输入模型（无需ID和时间戳）"""
    pass


class QuestionUpdate(BaseModel):
    """更新题目时的输入模型（所有字段可选）"""
    content: Annotated[Optional[str], Field(min_length=5, max_length=2000)] = None
    question_type: Annotated[Optional[QuestionType], Field()] = None
    difficulty: Annotated[Optional[float], Field(ge=0, le=5)] = None
    answer: Annotated[Optional[str], Field(min_length=1)] = None
    analysis: Annotated[Optional[str], Field(max_length=5000)] = None
    grade: Annotated[Optional[str], Field(min_length=2, max_length=50)] = None
    subject: Annotated[Optional[str], Field(min_length=2, max_length=50)] = None

    model_config = ConfigDict(
        extra="forbid",
        str_strip_whitespace=True
    )


class QuestionInDBBase(QuestionBase):
    """数据库中题目模型的基础序列化模型（包含ID和时间戳）"""
    id: int
    created_at: datetime
    updated_at: datetime
    created_by: Optional[str]

    # 允许从ORM模型实例创建Pydantic模型
    model_config = ConfigDict(from_attributes=True)


class Question(QuestionInDBBase):
    """查询题目时的输出模型（完整信息）"""
    pass


class QuestionGenerateRequest(BaseModel):
    """AI生成题目的请求参数模型"""
    outline: Annotated[str, Field(min_length=20, description="教学大纲文本，至少20个字符")]
    question_types: Annotated[List[QuestionType], Field(min_items=1, description="需要生成的题型列表")]
    counts: Annotated[List[int], Field(min_items=1, description="对应题型的数量，与question_types顺序一致")]
    difficulty_range: Annotated[List[float], Field(default=[1.0, 3.0], min_items=2, max_items=2, description="难度范围，如[1,3]")]
    major: Annotated[str, Field(min_length=2, description="所属专业")]
    course: Annotated[str, Field(min_length=2, description="所属课程")]

    # @field_validator('counts')
    # def counts_must_match_types(cls, v: list[int], values: dict) -> list[int]:
    #     """验证counts与question_types的长度是否一致"""
    #     if 'question_types' in values and len(v) != len(values['question_types']):
    #         raise ValueError("题型列表与数量列表长度必须一致")
    #     return v


    model_config = ConfigDict(extra="forbid")


class QuestionGenerateResponse(BaseModel):
    """AI生成题目的响应模型"""
    question_set_id: str  # 生成的题目集唯一标识
    total_count: int  # 生成的题目总数
    questions: List[Question]  # 生成的题目列表

    model_config = ConfigDict(from_attributes=True)
