# encoding: utf-8
# @File  : spider_runner.py
# @Author: shaoyun
# @Date  :  2025/05/11
import json
import os
import re
import subprocess
import zipfile
from datetime import datetime
from typing import Dict, Optional, Any, Tuple

from bson import ObjectId
from docker.errors import DockerException
from loguru import logger
from motor.motor_asyncio import AsyncIOMotorGridFSBucket

from app.core.config import settings
from app.db.mongo_manager import MongoDB
from app.utils.docker_client import DockerClient


class SpiderRunner:
    """爬虫运行器"""

    @staticmethod
    async def download_spider_file(spider_id: str, file_id: str) -> str:
        """从GridFS下载爬虫文件"""
        # 创建本地缓存目录
        cache_dir = os.path.join("worker_cache", "spiders", spider_id)
        os.makedirs(cache_dir, exist_ok=True)

        zip_path = os.path.join(cache_dir, f"{spider_id}.zip")

        # 如果文件已存在，直接返回
        if os.path.exists(zip_path):
            logger.info(f"Spider {spider_id} already cached")
            return cache_dir

        # 从GridFS下载
        try:
            # 获取数据库
            db = await MongoDB.get_database()
            fs = AsyncIOMotorGridFSBucket(db, bucket_name="files")

            # 下载文件
            with open(zip_path, 'wb') as output:
                await fs.download_to_stream(ObjectId(file_id), output)

            # 解压文件
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                zip_ref.extractall(cache_dir)

            logger.info(f"Downloaded and extracted spider {spider_id}")
            return cache_dir

        except Exception as e:
            logger.error(f"Error downloading spider file: {e}")
            # 如果下载失败，清理缓存目录
            if os.path.exists(zip_path):
                os.remove(zip_path)
            raise

    @staticmethod
    async def run_spider(
            task_id: str,
            spider_id: str,
            parameters: Optional[Dict[str, Any]] = None
    ) -> Tuple[bool, str]:
        """运行爬虫脚本"""
        try:
            # 从MongoDB获取爬虫信息
            spider = await MongoDB.find_one("spiders", {"_id": spider_id})
            if not spider:
                logger.error(f"Spider {spider_id} not found")
                return False, "Spider not found"

            # 下载爬虫文件
            spider_dir = await SpiderRunner.download_spider_file(
                spider_id,
                spider.get("file_id")
            )

            # 查找入口文件
            entry_file = await SpiderRunner.find_entry_file(spider_dir, spider['language'])
            if not entry_file:
                return False, "Entry file not found in spider package"

            # 根据配置选择运行方式
            if settings.DOCKER_ENABLED:
                success, result = await SpiderRunner._run_with_docker(
                    task_id, spider_id, spider_dir, entry_file, spider['language'], parameters
                )
            else:
                success, result = await SpiderRunner._run_locally(
                    task_id, spider_id, spider_dir, entry_file, spider['language'], parameters
                )

            # 记录执行结果
            execution_log = {
                "task_id": task_id,
                "spider_id": spider_id,
                "success": success,
                "message": result if not success else "Success",
                "log": result,
                "timestamp": datetime.utcnow()
            }
            await MongoDB.insert_one("execution_logs", execution_log)

            return success, result

        except Exception as e:
            logger.exception(f"Error running spider: {e}")
            # 记录错误日志
            execution_log = {
                "task_id": task_id,
                "spider_id": spider_id,
                "success": False,
                "message": str(e),
                "timestamp": datetime.utcnow()
            }
            await MongoDB.insert_one("execution_logs", execution_log)
            return False, str(e)

    @staticmethod
    async def find_entry_file(spider_dir: str, language: str) -> Optional[str]:
        """查找爬虫入口文件"""
        # 常见的入口文件名
        if language == "python":
            candidates = ["main.py", "spider.py", "run.py", "__main__.py"]
            extensions = [".py"]
        elif language in ["javascript", "node.js"]:
            candidates = ["index.js", "main.js", "spider.js", "app.js"]
            extensions = [".js"]
        else:
            return None

        # 首先查找常见的入口文件
        for candidate in candidates:
            path = os.path.join(spider_dir, candidate)
            if os.path.exists(path):
                return path

        # 如果没找到，查找第一个对应扩展名的文件
        for root, dirs, files in os.walk(spider_dir):
            # 跳过隐藏目录和__pycache__
            dirs[:] = [d for d in dirs if not d.startswith('.') and d != '__pycache__']

            for file in files:
                if any(file.endswith(ext) for ext in extensions):
                    return os.path.join(root, file)

        return None

    @staticmethod
    async def _run_with_docker(
            task_id: str,
            spider_id: str,
            spider_dir: str,
            entry_file: str,
            language: str,
            parameters: Optional[Dict[str, Any]] = None
    ) -> Tuple[bool, str]:
        """使用Docker容器运行爬虫"""
        try:
            client = DockerClient.get_client()

            # 创建参数文件
            params_file = os.path.join(spider_dir, 'params.json')
            with open(params_file, 'w') as f:
                json.dump(parameters or {}, f)

            # 创建结果目录
            result_dir = os.path.join(spider_dir, 'results')
            os.makedirs(result_dir, exist_ok=True)
            result_file = os.path.join(result_dir, f"{task_id}.json")

            try:
                # 选择镜像
                if language == 'python':
                    image = "python:3.9-slim"
                    # 检查是否有requirements.txt
                    requirements_path = os.path.join(spider_dir, 'requirements.txt')
                    if os.path.exists(requirements_path):
                        # 构建包含依赖的镜像
                        dockerfile_content = f"""
FROM python:3.9-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "{os.path.basename(entry_file)}", "--params", "params.json", "--output", "results/{task_id}.json"]
"""
                        dockerfile_path = os.path.join(spider_dir, 'Dockerfile')
                        with open(dockerfile_path, 'w') as f:
                            f.write(dockerfile_content)

                        # 构建镜像
                        image_tag = f"spider-{spider_id}:latest"
                        client.images.build(path=spider_dir, tag=image_tag)
                        image = image_tag
                        cmd = None
                    else:
                        cmd = ["python", f"/app/{os.path.basename(entry_file)}",
                               "--params", "/app/params.json",
                               "--output", f"/app/results/{task_id}.json"]

                elif language in ['javascript', 'node.js']:
                    image = "node:16-alpine"
                    cmd = ["node", f"/app/{os.path.basename(entry_file)}",
                           "--params", "/app/params.json",
                           "--output", f"/app/results/{task_id}.json"]
                else:
                    return False, f"Unsupported language: {language}"

                # 创建并运行容器
                container = client.containers.run(
                    image=image,
                    command=cmd,
                    volumes={
                        spider_dir: {'bind': '/app', 'mode': 'rw'}
                    },
                    network=settings.DOCKER_NETWORK if hasattr(settings, 'DOCKER_NETWORK') else None,
                    detach=True,
                    remove=False,
                    environment={
                        "TASK_ID": task_id,
                        "SPIDER_ID": spider_id,
                        "PYTHONUNBUFFERED": "1",
                        "NODE_ENV": "production"
                    },
                    mem_limit="512m",
                    cpu_quota=50000  # 限制CPU使用率50%
                )

                # 等待容器完成（最多等待5分钟）
                exit_code = container.wait(timeout=300)['StatusCode']

                # 获取日志
                logs = container.logs(stdout=True, stderr=True).decode('utf-8')

                # 删除容器
                container.remove()

                if exit_code != 0:
                    logger.error(f"Container exited with code {exit_code}: {logs}")
                    return False, f"Container exited with error code {exit_code}: {logs[-500:]}"

                # 检查结果文件
                if os.path.exists(result_file):
                    with open(result_file, 'r') as f:
                        results_data = json.load(f)

                    # 将结果存储到MongoDB
                    result_doc = {
                        "task_id": task_id,
                        "spider_id": spider_id,
                        "data": results_data,
                        "created_at": datetime.utcnow()
                    }
                    await MongoDB.insert_one("results", result_doc)

                    return True, logs
                else:
                    return False, f"No result file generated. Logs: {logs}"

            finally:
                # 清理参数文件
                if os.path.exists(params_file):
                    os.remove(params_file)

        except DockerException as e:
            logger.error(f"Docker error: {e}")
            return False, f"Docker error: {str(e)}"
        except Exception as e:
            logger.error(f"Unexpected error: {e}")
            return False, f"Unexpected error: {str(e)}"

    @staticmethod
    async def _run_locally(
            task_id: str,
            spider_id: str,
            spider_dir: str,
            entry_file: str,
            language: str,
            parameters: Optional[Dict[str, Any]] = None
    ) -> Tuple[bool, str]:
        """在本地运行爬虫（用于非Docker环境）"""
        try:
            # 创建参数文件
            params_file = os.path.join(spider_dir, 'params.json')
            with open(params_file, 'w') as f:
                json.dump(parameters or {}, f)

            # 创建结果目录
            result_dir = os.path.join(spider_dir, 'results')
            os.makedirs(result_dir, exist_ok=True)
            result_file = os.path.join(result_dir, f"{task_id}.json")

            try:
                # 准备命令
                if language == 'python':
                    cmd = ["python", entry_file]
                elif language in ['javascript', 'node.js']:
                    cmd = ["node", entry_file]
                else:
                    return False, f"Unsupported language: {language}"

                # 添加参数
                cmd.extend([
                    "--params", params_file,
                    "--output", result_file
                ])

                # 设置环境变量
                env = os.environ.copy()
                env["TASK_ID"] = task_id
                env["SPIDER_ID"] = spider_id
                env["PYTHONUNBUFFERED"] = "1"

                # 运行命令
                process = subprocess.Popen(
                    cmd,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    env=env,
                    cwd=spider_dir
                )

                # 等待执行完成（最多5分钟）
                try:
                    stdout, _ = process.communicate(timeout=300)
                    output = stdout.decode('utf-8', errors='replace')
                except subprocess.TimeoutExpired:
                    process.kill()
                    return False, "Spider execution timed out (5 minutes)"

                if process.returncode != 0:
                    logger.error(f"Spider execution failed with code {process.returncode}: {output}")
                    return False, f"Exit code {process.returncode}: {output[-500:]}"

                # 检查结果文件
                if os.path.exists(result_file):
                    with open(result_file, 'r') as f:
                        results_data = json.load(f)

                    # 将结果存储到MongoDB
                    result_doc = {
                        "task_id": task_id,
                        "spider_id": spider_id,
                        "data": results_data,
                        "created_at": datetime.utcnow()
                    }
                    await MongoDB.insert_one("results", result_doc)

                    return True, output
                else:
                    return False, f"No result file generated. Output: {output}"

            finally:
                # 清理参数文件
                if os.path.exists(params_file):
                    os.remove(params_file)

        except Exception as e:
            logger.exception(f"Error running spider locally: {e}")
            return False, str(e)

    @staticmethod
    async def get_spider_results(task_id: str) -> Dict[str, Any]:
        """获取爬虫执行结果"""
        result = await MongoDB.find_one("results", {"task_id": task_id})
        if not result:
            return {"data": [], "created_at": None}

        # 移除MongoDB的_id字段
        if "_id" in result:
            del result["_id"]

        return result

    @staticmethod
    async def get_execution_log(task_id: str) -> Dict[str, Any]:
        """获取执行日志"""
        log = await MongoDB.find_one("execution_logs", {"task_id": task_id})
        if not log:
            return {
                "success": False,
                "message": "Log not found",
                "log": "",
                "timestamp": None
            }

        # 移除MongoDB的_id字段
        if "_id" in log:
            del log["_id"]

        return log

    @staticmethod
    async def _extract_python_dependencies(code: str) -> list:
        """从Python代码中提取依赖"""
        dependencies = set()

        # 匹配import语句
        import_pattern = r'^\s*(?:from\s+(\S+)|import\s+(\S+))'
        for line in code.split('\n'):
            match = re.match(import_pattern, line)
            if match:
                module = match.group(1) or match.group(2)
                if module:
                    # 获取顶级模块名
                    base_module = module.split('.')[0]
                    # 过滤标准库
                    if base_module not in ['os', 'sys', 'json', 'time', 'datetime',
                                           're', 'math', 'random', 'collections',
                                           'itertools', 'functools', 'typing']:
                        dependencies.add(base_module)

        # 添加常用爬虫库映射
        dependency_map = {
            'requests': 'requests',
            'bs4': 'beautifulsoup4',
            'scrapy': 'scrapy',
            'selenium': 'selenium',
            'pandas': 'pandas',
            'numpy': 'numpy',
            'lxml': 'lxml'
        }

        return [dependency_map.get(dep, dep) for dep in dependencies]

    @staticmethod
    async def _extract_node_dependencies(code: str) -> dict:
        """从Node.js代码中提取依赖"""
        dependencies = {}

        # 匹配require语句
        require_pattern = r'(?:const|let|var)\s+\w+\s*=\s*require\s*\(\s*[\'"](\S+)[\'"]\s*\)'
        for match in re.finditer(require_pattern, code):
            module = match.group(1)
            if not module.startswith('.') and not module.startswith('/'):
                dependencies[module] = "latest"

        # 匹配import语句（ES6）
        import_pattern = r'import\s+.*?\s+from\s+[\'"](\S+)[\'"]'
        for match in re.finditer(import_pattern, code):
            module = match.group(1)
            if not module.startswith('.') and not module.startswith('/'):
                dependencies[module] = "latest"

        return dependencies

    @staticmethod
    async def cleanup_old_cache():
        """清理旧的缓存文件"""
        cache_dir = os.path.join("worker_cache", "spiders")
        if not os.path.exists(cache_dir):
            return
        # 获取当前时间
        current_time = datetime.now()
        # 遍历缓存目录
        for spider_id in os.listdir(cache_dir):
            spider_dir = os.path.join(cache_dir, spider_id)
            if os.path.isdir(spider_dir):
                # 检查目录修改时间
                mtime = datetime.fromtimestamp(os.path.getmtime(spider_dir))
                # 如果超过7天没有使用，删除缓存
                if (current_time - mtime).days > 7:
                    try:
                        import shutil
                        shutil.rmtree(spider_dir)
                        logger.info(f"Cleaned up old cache for spider {spider_id}")
                    except Exception as e:
                        logger.error(f"Error cleaning cache for spider {spider_id}: {e}")