import subprocess
from pathlib import Path
import logging
from typing import Dict, Union
from pymongo import MongoClient
from flask import Response, stream_with_context
from subprocess import Popen, PIPE
import sys
import os

# 配置日志
logger = logging.getLogger(__name__)

class CrawlerService:
    """爬虫任务服务封装"""

    SUPPORTED_PLATFORMS = ['dy', 'xhs', 'bili', 'ks', 'wb', 'tieba', 'zhihu']

    @classmethod
    def start_crawler(
        cls,
        platform: str,
        keywords: str,
        login_type: str = "cookie",
        crawl_type: str = "search",
        save_option: str = "json"
    ) -> Response:
        """
        启动爬虫任务的核心逻辑
        """
        try:
            # 构建爬虫命令
            backend_root = Path(__file__).parents[3]
            spider_root = backend_root / "spider"
            spider_root = spider_root.resolve()

            if not spider_root.exists():
                error_msg = f"无效的工作目录: {spider_root}"
                logger.error(error_msg)
                return Response(error_msg, status=400)

            if not (spider_root / "main.py").exists():
                error_msg = f"未找到 main.py 文件: {spider_root / 'main.py'}"
                logger.error(error_msg)
                return Response(error_msg, status=400)

            # 构建完整的命令
            activate_cmd = "conda activate spider"
            cd_cmd = f"cd {spider_root}"
            crawl_cmd = (
                f"python main.py --platform {platform} "
                f"--lt {login_type} "
                f"--type {crawl_type} "
                f"--keywords {keywords} "
                f"--save_data_option {save_option}"
            )
            full_cmd = f"{activate_cmd} && {cd_cmd} && {crawl_cmd}"

            logger.debug(f"Command: {full_cmd}")
            logger.debug(f"Working Directory: {spider_root}")

            # 执行命令并捕获实时输出
            process = subprocess.Popen(
                full_cmd,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,  # 修正为 subprocess.STDOUT
                text=True,
                bufsize=1,
                env={'PYTHONUNBUFFERED': '1', **os.environ}
            )

            def generate():
                while True:
                    output = process.stdout.readline()
                    if output == '' and process.poll() is not None:
                        break
                    if output:
                        sys.stdout.write(output)
                        sys.stdout.flush()
                        yield f"data: {output.strip()}\n\n"
                
                return_code = process.wait()
                if return_code != 0:
                    error_msg = f"爬虫执行失败，返回码: {return_code}"
                    logger.error(error_msg)
                    yield f"data: [ERROR] {error_msg}\n\n"
                else:
                    yield f"data: Process finished with return code {return_code}\n\n"

            return Response(stream_with_context(generate()), mimetype='text/event-stream')
        except Exception as e:
            error_msg = f"爬虫启动失败: {str(e)}"
            logger.exception(error_msg)
            return Response(error_msg, status=500)
        
        
    @classmethod
    def get_total_comments(cls, keywords: str) -> Dict[str, Union[str, int]]:
        """
        获取已爬取的评论总数
        
        Args:
            keywords: 搜索关键词

        Returns:
            包含状态和评论总数的字典
        """
        try:
            # MongoDB连接配置
            client = MongoClient('mongodb://localhost:27017/')
            db = client['crawler'] 

            # 初始化评论总数
            total_comments = 0

            # 遍历数据库中的所有集合（表）
            for collection_name in db.list_collection_names():
                if keywords in collection_name:  # 如果集合名称包含关键词
                    collection = db[collection_name]
                    total_comments += collection.count_documents({})  # 累加记录数

            return {
                "status": "success",
                "totalComments": total_comments
            }

        except Exception as e:
            error_msg = f"获取评论总数失败: {str(e)}"
            logger.exception(error_msg)
            return {"status": "error", "message": error_msg}
        

    @classmethod
    def get_DB_basement(cls) -> Dict[str, Union[str, dict]]:
        """
        获取数据库基础信息，包括表的数量、表的名称、大小和记录数目

        Returns:
            包含状态和数据库基础信息的字典
        """
        try:
            # 连接 MongoDB 数据库
            client = MongoClient('mongodb://localhost:27017/')
            db = client['crawler']

            # 获取数据库中的所有集合（表）名称
            collections = db.list_collection_names()
            num_collections = len(collections)

            # 初始化存储表信息的列表
            collection_info = []

            # 遍历每个集合，获取其名称、大小和记录数目
            for collection_name in collections:
                collection = db[collection_name]
                num_documents = collection.count_documents({})

                # 使用 db.command 获取集合的统计信息
                stats = db.command("collStats", collection_name)
                collection_size = stats.get("size", 0)  # 获取集合的大小（以字节为单位）

                collection_info.append({
                    "collectionName": collection_name,
                    "numDocuments": num_documents,
                    "size": collection_size
                })

            return {
                "status": "success",
                "dbInfo": {
                    "databaseName": "crawler",
                    "numCollections": num_collections,
                    "collections": collection_info
                }
            }

        except Exception as e:
            error_msg = f"获取数据库基础信息失败: {str(e)}"
            logger.exception(error_msg)
            return {"status": "error", "message": error_msg}
        




# import subprocess
# from pathlib import Path
# import logging
# from typing import Dict, Union
# from pymongo import MongoClient
# from flask import Response, stream_with_context
# from subprocess import Popen, PIPE
# import sys
# import os

# # 配置日志
# logger = logging.getLogger(__name__)

# class CrawlerService:
#     """爬虫任务服务封装"""

#     SUPPORTED_PLATFORMS = ['dy', 'xhs', 'bili', 'ks', 'wb', 'tieba', 'zhihu']

#     @classmethod
#     def start_crawler(
#         cls,
#         platform: str,
#         keywords: str,
#         login_type: str = "cookie",
#         crawl_type: str = "search",
#         save_option: str = "json"
#     ) -> Response:
#         """
#         启动爬虫任务的核心逻辑
#         """
#         try:
#             # 硬编码指定爬虫脚本路径
#             spider_root = "/www/wwwroot/public_sentiment_monitoring/spider"

#             # 检查路径是否存在
#             if not Path(spider_root).exists():
#                 error_msg = f"无效的工作目录: {spider_root}"
#                 logger.error(error_msg)
#                 return Response(error_msg, status=400)

#             if not Path(spider_root, "main.py").exists():
#                 error_msg = f"未找到 main.py 文件: {Path(spider_root, 'main.py')}"
#                 logger.error(error_msg)
#                 return Response(error_msg, status=400)

#             # 构建完整的命令
#             python_cmd = sys.executable  # 使用当前 Python 解释器路径
#             crawl_cmd = (
#                 f"{python_cmd} {spider_root}/main.py --platform {platform} "
#                 f"--lt {login_type} "
#                 f"--type {crawl_type} "
#                 f"--keywords {keywords} "
#                 f"--save_data_option {save_option}"
#             )

#             logger.debug(f"Command: {crawl_cmd}")
#             logger.debug(f"Working Directory: {spider_root}")

#             # 执行命令并捕获实时输出
#             process = subprocess.Popen(
#                 crawl_cmd,
#                 shell=True,
#                 stdout=subprocess.PIPE,
#                 stderr=subprocess.STDOUT,  # 将 stderr 重定向到 stdout
#                 text=True,
#                 bufsize=1,
#                 env={'PYTHONUNBUFFERED': '1', **os.environ},
#                 cwd=spider_root  # 设置工作目录
#             )

#             def generate():
#                 while True:
#                     output = process.stdout.readline()
#                     if output == '' and process.poll() is not None:
#                         break
#                     if output:
#                         sys.stdout.write(output)
#                         sys.stdout.flush()
#                         yield f"data: {output.strip()}\n\n"
                
#                 return_code = process.wait()
#                 if return_code != 0:
#                     error_msg = f"爬虫执行失败，返回码: {return_code}"
#                     logger.error(error_msg)
#                     yield f"data: [ERROR] {error_msg}\n\n"
#                 else:
#                     yield f"data: Process finished with return code {return_code}\n\n"

#             return Response(stream_with_context(generate()), mimetype='text/event-stream')
#         except Exception as e:
#             error_msg = f"爬虫启动失败: {str(e)}"
#             logger.exception(error_msg)
#             return Response(error_msg, status=500)

#     @classmethod
#     def get_total_comments(cls, keywords: str) -> Dict[str, Union[str, int]]:
#         """
#         获取已爬取的评论总数
        
#         Args:
#             keywords: 搜索关键词

#         Returns:
#             包含状态和评论总数的字典
#         """
#         try:
#             # MongoDB连接配置
#             client = MongoClient('mongodb://localhost:27017/')
#             db = client['crawler'] 

#             # 初始化评论总数
#             total_comments = 0

#             # 遍历数据库中的所有集合（表）
#             for collection_name in db.list_collection_names():
#                 if keywords in collection_name:  # 如果集合名称包含关键词
#                     collection = db[collection_name]
#                     total_comments += collection.count_documents({})  # 累加记录数

#             return {
#                 "status": "success",
#                 "totalComments": total_comments
#             }

#         except Exception as e:
#             error_msg = f"获取评论总数失败: {str(e)}"
#             logger.exception(error_msg)
#             return {"status": "error", "message": error_msg}
        

#     @classmethod
#     def get_DB_basement(cls) -> Dict[str, Union[str, dict]]:
#         """
#         获取数据库基础信息，包括表的数量、表的名称、大小和记录数目

#         Returns:
#             包含状态和数据库基础信息的字典
#         """
#         try:
#             # 连接 MongoDB 数据库
#             client = MongoClient('mongodb://localhost:27017/')
#             db = client['crawler']

#             # 获取数据库中的所有集合（表）名称
#             collections = db.list_collection_names()
#             num_collections = len(collections)

#             # 初始化存储表信息的列表
#             collection_info = []

#             # 遍历每个集合，获取其名称、大小和记录数目
#             for collection_name in collections:
#                 collection = db[collection_name]
#                 num_documents = collection.count_documents({})

#                 # 使用 db.command 获取集合的统计信息
#                 stats = db.command("collStats", collection_name)
#                 collection_size = stats.get("size", 0)  # 获取集合的大小（以字节为单位）

#                 collection_info.append({
#                     "collectionName": collection_name,
#                     "numDocuments": num_documents,
#                     "size": collection_size
#                 })

#             return {
#                 "status": "success",
#                 "dbInfo": {
#                     "databaseName": "crawler",
#                     "numCollections": num_collections,
#                     "collections": collection_info
#                 }
#             }

#         except Exception as e:
#             error_msg = f"获取数据库基础信息失败: {str(e)}"
#             logger.exception(error_msg)
#             return {"status": "error", "message": error_msg}