from pydantic import BaseModel, HttpUrl, Field, ConfigDict
from typing import Optional, Dict, Any, List
from datetime import datetime
from enum import Enum


class CrawlerJobStatus(str, Enum):
    """爬虫任务状态枚举"""
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"
    CANCELLED = "cancelled"


class CrawlerConfigBase(BaseModel):
    """爬虫配置基础模型"""
    # 基本配置
    max_pages: Optional[int] = Field(default=10, description="最大爬取页面数")
    max_depth: Optional[int] = Field(default=3, description="最大爬取深度")
    delay: Optional[float] = Field(default=1.0, description="请求间隔(秒)")
    
    # JavaScript支持
    javascript_enabled: Optional[bool] = Field(default=True, description="是否启用JavaScript渲染")
    wait_for: Optional[str] = Field(default=None, description="等待特定元素加载")
    
    # 内容提取
    extract_text: Optional[bool] = Field(default=True, description="是否提取文本内容")
    extract_links: Optional[bool] = Field(default=True, description="是否提取链接")
    extract_images: Optional[bool] = Field(default=True, description="是否提取图片")
    extract_metadata: Optional[bool] = Field(default=True, description="是否提取元数据")
    
    # 过滤规则
    include_patterns: Optional[List[str]] = Field(default=None, description="包含URL模式")
    exclude_patterns: Optional[List[str]] = Field(default=None, description="排除URL模式")
    
    # 自定义提取规则
    css_selectors: Optional[Dict[str, str]] = Field(default=None, description="CSS选择器规则")
    xpath_selectors: Optional[Dict[str, str]] = Field(default=None, description="XPath选择器规则")
    
    # 请求配置
    headers: Optional[Dict[str, str]] = Field(default=None, description="自定义请求头")
    user_agent: Optional[str] = Field(default=None, description="自定义User-Agent")
    proxy: Optional[str] = Field(default=None, description="代理设置")
    
    # 输出配置
    save_html: Optional[bool] = Field(default=False, description="是否保存原始HTML")
    output_format: Optional[str] = Field(default="json", description="输出格式")


class CrawlerJobCreate(BaseModel):
    """创建爬虫任务请求模型"""
    name: str = Field(..., min_length=1, max_length=255, description="任务名称")
    description: Optional[str] = Field(default=None, description="任务描述")
    url: HttpUrl = Field(..., description="目标URL")
    config: Optional[CrawlerConfigBase] = Field(default=None, description="爬虫配置")


class CrawlerJobUpdate(BaseModel):
    """更新爬虫任务请求模型"""
    name: Optional[str] = Field(default=None, min_length=1, max_length=255, description="任务名称")
    description: Optional[str] = Field(default=None, description="任务描述")
    config: Optional[CrawlerConfigBase] = Field(default=None, description="爬虫配置")


class CrawledPageResponse(BaseModel):
    """爬取页面响应模型"""
    model_config = ConfigDict(from_attributes=True)
    
    id: int
    url: str
    title: Optional[str] = None
    content: Optional[str] = None
    status_code: Optional[int] = None
    content_type: Optional[str] = None
    content_length: Optional[int] = None
    extracted_data: Optional[Dict[str, Any]] = None
    links: Optional[List[str]] = None
    images: Optional[List[str]] = None
    crawled_at: datetime


class CrawlerJobResponse(BaseModel):
    """爬虫任务响应模型"""
    model_config = ConfigDict(from_attributes=True)
    
    id: int
    user_id: int
    name: str
    description: Optional[str] = None
    url: str
    status: CrawlerJobStatus
    config: Optional[Dict[str, Any]] = None
    result_data: Optional[Dict[str, Any]] = None
    error_message: Optional[str] = None
    pages_crawled: int = 0
    total_content_size: int = 0
    started_at: Optional[datetime] = None
    completed_at: Optional[datetime] = None
    created_at: datetime
    updated_at: Optional[datetime] = None


class CrawlerJobListResponse(BaseModel):
    """爬虫任务列表响应模型"""
    jobs: List[CrawlerJobResponse]
    total: int
    page: int
    size: int


class CrawlerJobDetailResponse(CrawlerJobResponse):
    """爬虫任务详情响应模型"""
    crawled_pages: List[CrawledPageResponse] = []


class ScheduledCrawlerTaskCreate(BaseModel):
    """创建定时爬虫任务请求模型"""
    name: str = Field(..., min_length=1, max_length=255, description="任务名称")
    description: Optional[str] = Field(default=None, description="任务描述")
    url: HttpUrl = Field(..., description="目标URL")
    cron_expression: str = Field(..., description="Cron表达式")
    timezone: Optional[str] = Field(default="UTC", description="时区")
    config: Optional[CrawlerConfigBase] = Field(default=None, description="爬虫配置")


class ScheduledCrawlerTaskUpdate(BaseModel):
    """更新定时爬虫任务请求模型"""
    name: Optional[str] = Field(default=None, min_length=1, max_length=255, description="任务名称")
    description: Optional[str] = Field(default=None, description="任务描述")
    cron_expression: Optional[str] = Field(default=None, description="Cron表达式")
    timezone: Optional[str] = Field(default=None, description="时区")
    config: Optional[CrawlerConfigBase] = Field(default=None, description="爬虫配置")
    is_active: Optional[bool] = Field(default=None, description="是否激活")


class ScheduledCrawlerTaskResponse(BaseModel):
    """定时爬虫任务响应模型"""
    model_config = ConfigDict(from_attributes=True)
    
    id: int
    user_id: int
    name: str
    description: Optional[str] = None
    url: str
    cron_expression: str
    timezone: str = "UTC"
    config: Optional[Dict[str, Any]] = None
    is_active: bool = True
    total_executions: int = 0
    successful_executions: int = 0
    failed_executions: int = 0
    last_execution_at: Optional[datetime] = None
    next_execution_at: Optional[datetime] = None
    created_at: datetime
    updated_at: Optional[datetime] = None


class CrawlerExecuteRequest(BaseModel):
    """立即执行爬虫请求模型"""
    url: HttpUrl = Field(..., description="目标URL")
    config: Optional[CrawlerConfigBase] = Field(default=None, description="爬虫配置")


class CrawlerExecuteResponse(BaseModel):
    """立即执行爬虫响应模型"""
    success: bool
    data: Optional[Dict[str, Any]] = None
    error: Optional[str] = None
    pages_crawled: int = 0
    execution_time: float = 0.0