# encoding: utf-8
# @File  : spiders.py
# @Author: shaoyun
# @Date  : 2025/05/11
import os
import uuid
from datetime import datetime
from typing import Dict, Any, Optional

from bson import ObjectId
from fastapi import APIRouter, Depends, HTTPException, Path, UploadFile, File, Form
from fastapi.responses import FileResponse
from loguru import logger
from sqlalchemy.orm import Session
from starlette.background import BackgroundTask
from starlette.status import HTTP_404_NOT_FOUND
from motor.motor_asyncio import AsyncIOMotorGridFSBucket
import io

from app.api.dependencies import get_current_user, get_db
from app.core.response import get_response
from app.core.status_codes import SUCCESS, ERROR
from app.db.mongo_manager import MongoDB
from app.db.redis_manager import RedisClient
from app.models.user import User
from app.schemas.spider import SpiderInDB, SpiderWithStats

router = APIRouter()


@router.post("/create", response_model=Dict[str, Any])
async def create_spider(
        name: str = Form(...),
        description: Optional[str] = Form(None),
        language: str = Form("python"),
        file: UploadFile = File(...),
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """创建新爬虫脚本"""
    spider_id = str(uuid.uuid4())
    now = datetime.utcnow()

    # 验证文件类型
    if not file.filename.endswith('.zip'):
        return get_response(ERROR, data=None)

    # 获取数据库对象
    mongo_db = await MongoDB.get_database()

    # 上传文件到GridFS
    fs = AsyncIOMotorGridFSBucket(mongo_db, bucket_name="files")

    # 读取文件内容
    file_content = await file.read()

    # 上传到GridFS
    file_id = await fs.upload_from_stream(
        f"{spider_id}.zip",
        io.BytesIO(file_content),
        metadata={
            "spider_id": spider_id,
            "user_id": current_user.id,
            "filename": file.filename
        }
    )

    # 保存爬虫信息到数据库
    spider_doc = {
        "_id": spider_id,
        "name": name,
        "description": description,
        "language": language,
        "file_id": str(file_id),  # GridFS文件ID
        "user_id": current_user.id,
        "version": "1.0.0",
        "created_at": now,
        "updated_at": now
    }
    await MongoDB.insert_one("spiders", spider_doc)

    # 发布文件上传事件到Redis
    await RedisClient.publish_event("file.upload", {
        "spider_id": spider_id,
        "file_id": str(file_id),
        "action": "create"
    })

    return get_response(SUCCESS, data=spider_doc)

@router.get("/list", response_model=Dict[str, Any])
async def get_spiders(
        skip: int = 0,
        limit: int = 100,
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """获取爬虫脚本列表"""
    is_admin = current_user.role.value == "admin"
    query = {} if is_admin else {"user_id": current_user.id}
    spiders = await MongoDB.find_many("spiders", query, skip=skip, limit=limit)

    spider_list = []
    for spider in spiders:
        spider_data = {
            "id": spider["_id"],
            "name": spider["name"],
            "description": spider.get("description"),
            "language": spider["language"],
            "user_id": spider["user_id"],
            "version": spider["version"],
            "created_at": spider["created_at"],
            "updated_at": spider["updated_at"]
        }

        # 兼容处理：优先使用 file_id，如果没有则使用 path
        if "file_id" in spider:
            spider_data["file_id"] = spider["file_id"]
        elif "path" in spider:
            spider_data["path"] = spider["path"]

        spider_list.append(spider_data)

    # 获取总数
    total = await MongoDB.count_documents("spiders", query)

    # 返回前端期望的格式
    return get_response(SUCCESS, data={
        "items": spider_list,
        "total": total,
        "page": (skip // limit) + 1,
        "limit": limit
    })

@router.get("/{spider_id}", response_model=SpiderInDB)
async def get_spider(
    spider_id: str = Path(...),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取爬虫脚本详情"""
    spider = await MongoDB.find_one("spiders", {"_id": spider_id})

    if not spider:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Spider not found")

    is_admin = current_user.role.value == "admin"
    if not is_admin and spider["user_id"] != current_user.id:
        raise HTTPException(status_code=403, detail="Not authorized to access this spider")

    return SpiderInDB(
        id=spider["_id"],
        name=spider["name"],
        description=spider.get("description"),
        language=spider["language"],
        path=spider.get("path"),  # 可能为 None
        file_id=spider.get("file_id"),  # 可能为 None
        user_id=spider["user_id"],
        version=spider["version"],
        created_at=spider["created_at"],
        updated_at=spider["updated_at"]
    )


# 修复 spiders.py 中的 update_spider 函数

@router.put("/{spider_id}/update", response_model=SpiderInDB)
async def update_spider(
        spider_id: str = Path(...),
        name: Optional[str] = Form(None),
        description: Optional[str] = Form(None),
        language: Optional[str] = Form(None),
        file: Optional[UploadFile] = File(None),
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """更新爬虫脚本"""
    spider = await MongoDB.find_one("spiders", {"_id": spider_id})

    if not spider:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Spider not found")

    is_admin = current_user.role.value == "admin"
    if not is_admin and spider["user_id"] != current_user.id:
        raise HTTPException(status_code=403, detail="Not authorized to update this spider")

    update_data = {}
    if name is not None:
        update_data["name"] = name
    if description is not None:
        update_data["description"] = description
    if language is not None:
        update_data["language"] = language

    if file is not None:
        if not file.filename.endswith('.zip'):
            raise HTTPException(status_code=400, detail="Only ZIP files are allowed")

        # 使用 GridFS 上传新文件
        mongo_db = await MongoDB.get_database()
        fs = AsyncIOMotorGridFSBucket(mongo_db, bucket_name="files")

        # 读取文件内容
        file_content = await file.read()

        # 如果存在旧的 GridFS 文件，删除它
        if spider.get("file_id"):
            try:
                old_file_id = ObjectId(spider["file_id"])
                await fs.delete(old_file_id)
            except Exception as e:
                logger.warning(f"Failed to delete old file: {e}")

        # 上传新文件
        new_file_id = await fs.upload_from_stream(
            f"{spider_id}.zip",
            io.BytesIO(file_content),
            metadata={
                "spider_id": spider_id,
                "user_id": current_user.id,
                "filename": file.filename
            }
        )

        update_data["file_id"] = str(new_file_id)

        # 发布文件更新事件
        await RedisClient.publish_event("file.upload", {
            "spider_id": spider_id,
            "file_id": str(new_file_id),
            "action": "update"
        })

    if update_data:
        current_version = spider["version"]
        version_parts = current_version.split(".")
        version_parts[-1] = str(int(version_parts[-1]) + 1)
        update_data["version"] = ".".join(version_parts)
        update_data["updated_at"] = datetime.utcnow()
        await MongoDB.update_one("spiders", {"_id": spider_id}, update_data)

        spider = await MongoDB.find_one("spiders", {"_id": spider_id})

    return SpiderInDB(
        id=spider["_id"],
        name=spider["name"],
        description=spider.get("description"),
        language=spider["language"],
        path=spider.get("path"),  # 兼容旧版本
        file_id=spider.get("file_id"),  # 新版本
        user_id=spider["user_id"],
        version=spider["version"],
        created_at=spider["created_at"],
        updated_at=spider["updated_at"]
    )
@router.delete("/{spider_id}/delete", response_model=Dict[str, Any])
async def delete_spider(
    spider_id: str = Path(...),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """删除爬虫脚本"""
    spider = await MongoDB.find_one("spiders", {"_id": spider_id})

    if not spider:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Spider not found")

    is_admin = current_user.role.value == "admin"
    if not is_admin and spider["user_id"] != current_user.id:
        raise HTTPException(status_code=403, detail="Not authorized to delete this spider")

    deleted_count = await MongoDB.delete_one("spiders", {"_id": spider_id})

    if deleted_count == 0:
        raise HTTPException(status_code=500, detail="Failed to delete spider")

    return {"success": True, "message": "Spider deleted successfully"}


@router.get("/{spider_id}/download", response_class=FileResponse)
async def download_spider(
        spider_id: str = Path(...),
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """下载爬虫脚本 ZIP 文件"""
    spider = await MongoDB.find_one("spiders", {"_id": spider_id})
    if not spider:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Spider not found")

    if spider["user_id"] != current_user.id and current_user.role.value != "admin":
        raise HTTPException(status_code=403, detail="Permission denied")

    # 检查是使用 GridFS 还是本地文件系统
    if spider.get("file_id"):
        # 使用 GridFS
        mongo_db = await MongoDB.get_database()
        fs = AsyncIOMotorGridFSBucket(mongo_db, bucket_name="files")

        try:
            # 方法1：使用 download_to_stream (推荐)
            file_id = ObjectId(spider["file_id"])

            # 创建一个 BytesIO 对象来接收文件内容
            file_stream = io.BytesIO()

            # 下载文件到流
            await fs.download_to_stream(file_id, file_stream)

            # 获取文件内容
            file_content = file_stream.getvalue()

            # 创建临时文件
            import tempfile
            with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_file:
                tmp_file.write(file_content)
                tmp_file_path = tmp_file.name

            return FileResponse(
                path=tmp_file_path,
                filename=f"{spider['name']}.zip",
                media_type="application/zip",
                # 添加这个参数确保文件在发送后被删除
                background=BackgroundTask(lambda: os.unlink(tmp_file_path))
            )

        except Exception as e:
            logger.error(f"Error downloading file from GridFS: {e}")
            raise HTTPException(status_code=500, detail="Failed to download file")

    elif spider.get("path"):
        # 使用本地文件系统（兼容旧版本）
        file_path = spider["path"]
        if not os.path.exists(file_path):
            raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="File not found")

        return FileResponse(
            path=file_path,
            filename=f"{spider['name']}.zip",
            media_type="application/zip"
        )
    else:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="No file associated with this spider")
@router.get("/{spider_id}/stats", response_model=SpiderWithStats)
async def get_spider_stats(
    spider_id: str = Path(...),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取爬虫脚本统计信息"""
    spider = await MongoDB.find_one("spiders", {"_id": spider_id})

    if not spider:
        raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Spider not found")

    is_admin = current_user.role.value == "admin"
    if not is_admin and spider["user_id"] != current_user.id:
        raise HTTPException(status_code=403, detail="Not authorized to access this spider's stats")

    tasks = await MongoDB.find_many("tasks", {"spider_id": spider_id})

    run_count = len(tasks)
    success_count = len([t for t in tasks if t.get("status") == "completed"])
    failure_count = len([t for t in tasks if t.get("status") == "failed"])

    durations = []
    for task in tasks:
        if task.get("started_at") and task.get("finished_at"):
            start = task["started_at"]
            end = task["finished_at"]
            if isinstance(start, str):
                start = datetime.fromisoformat(start.replace("Z", "+00:00"))
            if isinstance(end, str):
                end = datetime.fromisoformat(end.replace("Z", "+00:00"))
            duration = (end - start).total_seconds()
            durations.append(duration)

    average_runtime = sum(durations) / len(durations) if durations else 0

    return SpiderWithStats(
        id=spider["_id"],
        name=spider["name"],
        description=spider.get("description"),
        language=spider["language"],
        path=spider.get("path"),  # 兼容旧版本
        file_id=spider.get("file_id"),  # 新版本
        user_id=spider["user_id"],
        version=spider["version"],
        created_at=spider["created_at"],
        updated_at=spider["updated_at"],
        run_count=run_count,
        success_count=success_count,
        failure_count=failure_count,
        average_runtime=average_runtime
    )