import asyncio
import subprocess
from pathlib import Path
import logging
from typing import Dict, Union
from pymongo import MongoClient
from fastapi.responses import StreamingResponse, JSONResponse
import sys
import os
import locale

# 配置日志
logger = logging.getLogger(__name__)


class CrawlerService:
    """爬虫任务服务封装（支持conda环境）"""

    SUPPORTED_PLATFORMS = ['dy', 'xhs', 'bili', 'ks', 'wb', 'tieba', 'zhihu']

    @classmethod
    async def start_crawler(
            cls,
            platform: str,
            keywords: str,
            login_type: str = "cookie",
            crawl_type: str = "search",
            save_option: str = "json"
    ) -> StreamingResponse:
        try:
            # 构建爬虫路径（保持原逻辑）
            backend_root = Path(__file__).parents[3]
            spider_root = backend_root / "spider"

            # 路径验证
            if not (spider_root / "main.py").exists():
                error_msg = f"未找到 main.py 文件: {spider_root}"
                logger.error(error_msg)
                return StreamingResponse(...)

            # 构建完整命令（关键修改点）
            command = (
                f"conda activate spider && "  # 激活conda环境
                f"cd {spider_root} && "
                f"python -u main.py "  # -u参数强制无缓冲
                f"--platform {platform} "
                f"--lt {login_type} "
                f"--type {crawl_type} "
                f"--keywords '{keywords}' "  # 处理含空格的关键词
                f"--save_data_option {save_option}"
            )

            # 环境变量配置（关键修改点）
            env = os.environ.copy()
            env.update({
                'PYTHONUNBUFFERED': '1',
                'PYTHONIOENCODING': 'utf-8',
                'LANG': 'zh_CN.UTF-8',
                'LC_ALL': 'zh_CN.UTF-8',
                'CONDA_DEFAULT_ENV': 'spider',  # 显式声明环境
                'CONDA_PREFIX': os.environ.get('CONDA_PREFIX', '')  # 继承当前环境路径
            })

            # 启动异步子进程（关键修改点）
            process = await asyncio.create_subprocess_shell(
                command,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.STDOUT,
                env=env,
                cwd=str(spider_root),  # 设置工作目录
                shell=True  # 必须为True才能使用shell语法
            )

            async def generate():
                try:
                    while True:
                        line = await process.stdout.readline()
                        if not line:
                            break

                        # 解码处理
                        try:
                            output = line.decode('utf-8').strip()
                        except UnicodeDecodeError:
                            output = line.decode('utf-8', errors='replace').strip()

                        # 实时推送
                        yield f"data: {output}\n\n"

                    # 检查最终状态
                    return_code = await process.wait()
                    if return_code != 0:
                        yield f"data: [ERROR] Process exited with code {return_code}\n\n"
                except asyncio.CancelledError:
                    logger.warning("客户端断开连接，终止进程")
                    process.terminate()
                    await process.wait()
                finally:
                    if process.returncode is None:
                        process.kill()
                        await process.wait()

            return StreamingResponse(
                generate(),
                media_type='text/event-stream',
                headers={
                    'Cache-Control': 'no-cache',
                    'X-Accel-Buffering': 'no'
                }
            )

        except Exception as e:
            error_msg = f"爬虫启动失败: {str(e)}"
            logger.exception(error_msg)
            return StreamingResponse(
                iter([f"data: [ERROR] {error_msg}\n\n".encode()]),
                status_code=500,
                media_type='text/event-stream'
            )
    @classmethod
    async def get_total_comments(cls, keywords: str) -> Dict[str, Union[str, int]]:
        """
        获取已爬取的评论总数
        
        Args:
            keywords: 搜索关键词

        Returns:
            包含状态和评论总数的字典
        """
        try:
            # MongoDB连接配置
            client = MongoClient('mongodb://localhost:27017/')
            db = client['crawler'] 

            # 初始化评论总数
            total_comments = 0

            # 遍历数据库中的所有集合（表）
            for collection_name in db.list_collection_names():
                if keywords in collection_name:  # 如果集合名称包含关键词
                    collection = db[collection_name]
                    total_comments += collection.count_documents({})  # 累加记录数

            return {
                "status": "success",
                "totalComments": total_comments
            }

        except Exception as e:
            error_msg = f"获取评论总数失败: {str(e)}"
            logger.exception(error_msg)
            return {"status": "error", "message": error_msg}
        

    @classmethod
    async def get_DB_basement(cls) -> Dict[str, Union[str, dict]]:
        """
        获取数据库基础信息，包括表的数量、表的名称、大小和记录数目

        Returns:
            包含状态和数据库基础信息的字典
        """
        try:
            # 连接 MongoDB 数据库
            client = MongoClient('mongodb://localhost:27017/')
            db = client['crawler']

            # 获取数据库中的所有集合（表）名称
            collections = db.list_collection_names()
            num_collections = len(collections)

            # 初始化存储表信息的列表
            collection_info = []

            # 遍历每个集合，获取其名称、大小和记录数目
            for collection_name in collections:
                collection = db[collection_name]
                num_documents = collection.count_documents({})

                # 使用 db.command 获取集合的统计信息
                stats = db.command("collStats", collection_name)
                collection_size = stats.get("size", 0)  # 获取集合的大小（以字节为单位）

                collection_info.append({
                    "collectionName": collection_name,
                    "numDocuments": num_documents,
                    "size": collection_size
                })

            return {
                "status": "success",
                "dbInfo": {
                    "databaseName": "crawler",
                    "numCollections": num_collections,
                    "collections": collection_info
                }
            }

        except Exception as e:
            error_msg = f"获取数据库基础信息失败: {str(e)}"
            logger.exception(error_msg)
            return {"status": "error", "message": error_msg} 