from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks, status
from fastapi.responses import PlainTextResponse
from typing import List, Optional
from sqlalchemy.orm import Session
import os
import uuid
from datetime import datetime

from app.schemas import spider, common, job
from app.core.config import settings
from app.db.session import get_db
from app.crud.crud_spider import crud_spider  # 直接导入实例
from app.crud.crud_job import crud_job
from app.services.spider_service import spider_service
from app.db.models import JobStatus, JobType

# 删除这行，因为已经导入了crud_spider
# crud_spider = CRUDSpider()

router = APIRouter()

@router.post("/", response_model=spider.SpiderRead, status_code=status.HTTP_201_CREATED)
async def create_spider(
    background_tasks: BackgroundTasks,
    db: Session = Depends(get_db),
    file: UploadFile = File(...),
    requirements: Optional[UploadFile] = None,
    name: str = Form(...),
    description: Optional[str] = Form(None)
):
    """上传一个新的爬虫"""
    # 检查爬虫名称是否已存在
    if crud_spider.get_by_name(db, name=name):
        raise HTTPException(status_code=400, detail="此名称的爬虫已存在")
    
    # 生成唯一ID
    spider_id = str(uuid.uuid4())
    
    # 创建爬虫目录
    spider_dir = spider_service.ensure_spider_directory(spider_id)
    
    # 保存爬虫文件
    code_path = os.path.join(spider_dir, "spider.py")
    with open(code_path, "wb") as f:
        f.write(await file.read())
    
    # 保存依赖文件(如果有)
    dependencies_path = None
    if requirements:
        dependencies_path = os.path.join(spider_dir, "requirements.txt")
        with open(dependencies_path, "wb") as f:
            f.write(await requirements.read())
    
    # 创建爬虫记录
    spider_data = spider.SpiderCreate(
        name=name,
        description=description
    )
    
    # 添加额外字段
    spider_obj = crud_spider.create(db, obj_in=spider_data)
    
    # 更新路径信息
    crud_spider.update(
        db,
        db_obj=spider_obj,
        obj_in={
            "id": spider_id,
            "code_path": code_path,
            "dependencies_path": dependencies_path
        }
    )
    
    return spider_obj

@router.get("/", response_model=List[spider.SpiderRead])
def get_spiders(
    skip: int = 0, 
    limit: int = 100,
    db: Session = Depends(get_db)
):
    """获取所有爬虫列表"""
    return crud_spider.get_multi(db, skip=skip, limit=limit)

@router.get("/{spider_id}", response_model=spider.SpiderRead)
def get_spider(
    spider_id: str,
    db: Session = Depends(get_db)
):
    """获取指定爬虫的详细信息"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    return db_spider

@router.put("/{spider_id}", response_model=spider.SpiderRead)
def update_spider(
    spider_id: str,
    spider_update: spider.SpiderUpdate,
    db: Session = Depends(get_db)
):
    """更新爬虫的元数据(名称、描述)"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    # 检查名称是否重复
    if spider_update.name and spider_update.name != db_spider.name:
        if crud_spider.get_by_name(db, name=spider_update.name):
            raise HTTPException(status_code=400, detail="此名称的爬虫已存在")
    
    updated_spider = crud_spider.update(db, db_obj=db_spider, obj_in=spider_update)
    return updated_spider

@router.delete("/{spider_id}", response_model=common.Msg)
def delete_spider(
    spider_id: str,
    db: Session = Depends(get_db)
):
    """删除一个爬虫(包括代码文件和数据库记录)"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    # 删除关联的定时任务
    from app.crud.crud_schedule import crud_schedule
    crud_schedule.delete_by_spider_id(db, spider_id=spider_id)
    
    # 删除爬虫文件
    try:
        spider_service.delete_spider_directory(spider_id)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除爬虫文件失败: {str(e)}")
    
    # 删除数据库记录
    crud_spider.remove(db, id=spider_id)
    
    return {"message": "爬虫删除成功"}

@router.get("/{spider_id}/code", response_class=PlainTextResponse)
def get_spider_code(
    spider_id: str,
    db: Session = Depends(get_db)
):
    """获取爬虫的Python代码内容"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    code = spider_service.read_spider_file(db_spider.code_path)
    if code is None:
        raise HTTPException(status_code=404, detail="爬虫代码文件不存在")
    
    return code

@router.put("/{spider_id}/code", response_model=common.Msg)
async def update_spider_code(
    spider_id: str,
    code: str,
    db: Session = Depends(get_db)
):
    """更新爬虫的Python代码"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    try:
        with open(db_spider.code_path, "w", encoding="utf-8") as f:
            f.write(code)
        
        # 更新爬虫记录的更新时间
        crud_spider.update(db, db_obj=db_spider, obj_in={"updated_at": datetime.now()})
        
        return {"message": "代码更新成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"写入代码文件失败: {str(e)}")

@router.get("/{spider_id}/dependencies", response_class=PlainTextResponse)
def get_spider_dependencies(
    spider_id: str,
    db: Session = Depends(get_db)
):
    """获取爬虫的requirements.txt内容"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    if not db_spider.dependencies_path:
        return ""  # 返回空字符串表示没有依赖
    
    dependencies = spider_service.read_spider_file(db_spider.dependencies_path)
    if dependencies is None:
        return ""  # 依赖文件不存在
    
    return dependencies

@router.put("/{spider_id}/dependencies", response_model=common.Msg)
async def update_spider_dependencies(
    spider_id: str,
    dependencies: str,
    db: Session = Depends(get_db)
):
    """更新或创建爬虫的requirements.txt"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    # 确定依赖文件路径
    dependencies_path = db_spider.dependencies_path
    if not dependencies_path:
        spider_dir = os.path.dirname(db_spider.code_path)
        dependencies_path = os.path.join(spider_dir, "requirements.txt")
    
    try:
        with open(dependencies_path, "w", encoding="utf-8") as f:
            f.write(dependencies)
        
        # 更新爬虫记录
        crud_spider.update(
            db,
            db_obj=db_spider,
            obj_in={
                "updated_at": datetime.now(),
                "dependencies_path": dependencies_path
            }
        )
        
        return {"message": "依赖更新成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"写入依赖文件失败: {str(e)}")

@router.post("/{spider_id}/install_dependencies", response_model=job.JobRead, status_code=202)
def install_dependencies(
    spider_id: str,
    background_tasks: BackgroundTasks,
    db: Session = Depends(get_db)
):
    """触发为指定爬虫安装依赖(后台任务)"""
    db_spider = crud_spider.get(db, id=spider_id)
    if not db_spider:
        raise HTTPException(status_code=404, detail="爬虫不存在")
    
    # 检查是否有正在进行的依赖安装任务
    active_jobs = crud_job.get_active_dependency_jobs(db, spider_id=spider_id)
    if active_jobs:
        raise HTTPException(status_code=409, detail="依赖安装任务已在进行中")
    
    # 创建一个新的依赖安装任务
    job_data = job.JobCreate(
        spider_id=spider_id,
        job_type=job.JobType.DEPENDENCY_INSTALLATION
    )
    
    new_job = crud_job.create(db, obj_in=job_data)
    
    # 在后台执行依赖安装
    background_tasks.add_task(
        spider_service.install_dependencies,
        spider_id=spider_id,
        job_id=new_job.id
    )
    
    return new_job