from sqlalchemy import Column, Integer, String, DateTime, Text, Boolean, ForeignKey, JSON
from sqlalchemy.sql.functions import func
from sqlalchemy.orm import relationship
from enum import Enum

from app.core.database import Base


class CrawlerJobStatus(str, Enum):
    """爬虫任务状态枚举"""
    PENDING = "pending"      # 等待执行
    RUNNING = "running"      # 正在执行
    COMPLETED = "completed"  # 执行完成
    FAILED = "failed"        # 执行失败
    CANCELLED = "cancelled"  # 已取消


class CrawlerJob(Base):
    """爬虫任务模型"""
    __tablename__ = "crawler_jobs"

    id = Column(Integer, primary_key=True, index=True)
    user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
    
    # 任务基本信息
    name = Column(String(255), nullable=False)
    description = Column(Text)
    url = Column(String(2048), nullable=False)
    status = Column(String(20), default=CrawlerJobStatus.PENDING, nullable=False)
    
    # 爬虫配置
    config = Column(JSON)  # 存储爬虫配置参数
    
    # 执行结果
    result_data = Column(JSON)  # 存储爬取结果
    error_message = Column(Text)  # 错误信息
    
    # 统计信息
    pages_crawled = Column(Integer, default=0)
    total_content_size = Column(Integer, default=0)  # 内容总大小(bytes)
    
    # 时间字段
    started_at = Column(DateTime(timezone=True))
    completed_at = Column(DateTime(timezone=True))
    created_at = Column(DateTime(timezone=True), server_default=func.now())
    updated_at = Column(DateTime(timezone=True), onupdate=func.now())
    
    # 关联关系
    user = relationship("User", back_populates="crawler_jobs")
    crawled_pages = relationship("CrawledPage", back_populates="job", cascade="all, delete-orphan")
    
    def __repr__(self):
        return f"<CrawlerJob(id={self.id}, name='{self.name}', status='{self.status}')>"


class CrawledPage(Base):
    """爬取页面模型"""
    __tablename__ = "crawled_pages"

    id = Column(Integer, primary_key=True, index=True)
    job_id = Column(Integer, ForeignKey("crawler_jobs.id"), nullable=False)
    
    # 页面信息
    url = Column(String(2048), nullable=False)
    title = Column(String(500))
    content = Column(Text)  # 提取的内容
    raw_html = Column(Text)  # 原始HTML（可选）
    
    # 页面元数据
    status_code = Column(Integer)
    content_type = Column(String(100))
    content_length = Column(Integer)
    
    # 提取的结构化数据
    extracted_data = Column(JSON)  # 使用提取规则得到的结构化数据
    
    # 链接和媒体
    links = Column(JSON)  # 页面中的链接
    images = Column(JSON)  # 页面中的图片
    
    # 时间字段
    crawled_at = Column(DateTime(timezone=True), server_default=func.now())
    
    # 关联关系
    job = relationship("CrawlerJob", back_populates="crawled_pages")
    
    def __repr__(self):
        return f"<CrawledPage(id={self.id}, url='{self.url}', title='{self.title}')>"


class ScheduledCrawlerTask(Base):
    """定时爬虫任务模型"""
    __tablename__ = "scheduled_crawler_tasks"

    id = Column(Integer, primary_key=True, index=True)
    user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
    
    # 任务信息
    name = Column(String(255), nullable=False)
    description = Column(Text)
    url = Column(String(2048), nullable=False)
    
    # 调度配置
    cron_expression = Column(String(100), nullable=False)  # Cron表达式
    timezone = Column(String(50), default="UTC")
    
    # 爬虫配置
    config = Column(JSON)  # 爬虫配置参数
    
    # 状态
    is_active = Column(Boolean, default=True)
    
    # 执行统计
    total_executions = Column(Integer, default=0)
    successful_executions = Column(Integer, default=0)
    failed_executions = Column(Integer, default=0)
    last_execution_at = Column(DateTime(timezone=True))
    next_execution_at = Column(DateTime(timezone=True))
    
    # 时间字段
    created_at = Column(DateTime(timezone=True), server_default=func.now())
    updated_at = Column(DateTime(timezone=True), onupdate=func.now())
    
    # 关联关系
    user = relationship("User", back_populates="scheduled_crawler_tasks")
    
    def __repr__(self):
        return f"<ScheduledCrawlerTask(id={self.id}, name='{self.name}', is_active={self.is_active})>"