#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
小红书爬虫桥接模块
用于连接小红书数据管理系统和xhs_collection-master爬虫工具
"""

import os
import sys
import importlib.util
import logging
import subprocess
import time
import threading
import requests
from pathlib import Path

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'crawler_bridge.log'))
    ]
)
logger = logging.getLogger('xhs_crawler_bridge')

# 全局变量
CRAWLER_PORT = 7860  # Gradio默认端口
CRAWLER_HOST = "127.0.0.1"
CRAWLER_URL = f"http://{CRAWLER_HOST}:{CRAWLER_PORT}"
CRAWLER_PROCESS = None
CRAWLER_RUNNING = False
CRAWLER_ERROR = None

# 爬虫模块路径
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
# 更新爬虫模块路径，指向根目录下的xhs_crawler目录
CRAWLER_MODULE_PATH = os.path.join(PARENT_DIR, "xhs_crawler", "xhs_crawler_gradio.py")

logger.info(f"初始化爬虫桥接模块，当前路径: {CURRENT_DIR}")
logger.info(f"系统路径: {sys.path}")
logger.info(f"爬虫模块路径: {CRAWLER_MODULE_PATH}")

# 检查爬虫模块是否存在
if not os.path.exists(CRAWLER_MODULE_PATH):
    error_msg = f"找不到爬虫模块: {CRAWLER_MODULE_PATH}"
    logger.error(error_msg)
    CRAWLER_ERROR = error_msg
else:
    logger.info(f"找到爬虫模块文件: {CRAWLER_MODULE_PATH}")

def launch_crawler():
    """
    启动爬虫应用
    返回爬虫应用的URL
    """
    global CRAWLER_PROCESS, CRAWLER_RUNNING, CRAWLER_ERROR
    
    # 如果爬虫已经在运行，直接返回URL
    if CRAWLER_RUNNING and CRAWLER_PROCESS and CRAWLER_PROCESS.poll() is None:
        logger.info("爬虫已经在运行，返回URL")
        return CRAWLER_URL
    
    # 重置错误状态
    CRAWLER_ERROR = None
    
    try:
        # 添加爬虫模块所在目录到系统路径
        crawler_dir = os.path.dirname(CRAWLER_MODULE_PATH)
        if crawler_dir not in sys.path:
            sys.path.insert(0, crawler_dir)
        
        # 使用子进程启动爬虫应用
        logger.info(f"启动爬虫进程: {CRAWLER_MODULE_PATH}")
        
        # 使用Python解释器启动爬虫模块
        python_executable = sys.executable
        CRAWLER_PROCESS = subprocess.Popen(
            [python_executable, CRAWLER_MODULE_PATH],
            cwd=crawler_dir,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True
        )
        
        # 等待爬虫启动
        logger.info("等待爬虫启动...")
        time.sleep(3)  # 给爬虫一些启动时间
        
        # 检查爬虫是否成功启动
        if CRAWLER_PROCESS.poll() is not None:
            # 进程已退出
            stdout, stderr = CRAWLER_PROCESS.communicate()
            error_msg = f"爬虫进程启动失败: {stderr}"
            logger.error(error_msg)
            CRAWLER_ERROR = error_msg
            CRAWLER_RUNNING = False
            raise Exception(error_msg)
        
        # 启动监控线程
        monitor_thread = threading.Thread(target=monitor_crawler_process)
        monitor_thread.daemon = True
        monitor_thread.start()
        
        # 检查爬虫服务是否可访问
        retry_count = 0
        max_retries = 5
        while retry_count < max_retries:
            try:
                response = requests.get(CRAWLER_URL, timeout=2)
                if response.status_code == 200:
                    logger.info(f"爬虫服务已启动，可通过 {CRAWLER_URL} 访问")
                    CRAWLER_RUNNING = True
                    return CRAWLER_URL
            except requests.exceptions.RequestException:
                retry_count += 1
                logger.info(f"等待爬虫服务响应... ({retry_count}/{max_retries})")
                time.sleep(2)
        
        # 如果达到最大重试次数仍无法访问
        error_msg = f"爬虫服务启动后无法访问: {CRAWLER_URL}"
        logger.error(error_msg)
        CRAWLER_ERROR = error_msg
        return CRAWLER_URL  # 仍然返回URL，让前端处理错误
        
    except Exception as e:
        error_msg = f"启动爬虫时发生错误: {str(e)}"
        logger.error(error_msg)
        CRAWLER_ERROR = error_msg
        CRAWLER_RUNNING = False
        raise

def monitor_crawler_process():
    """监控爬虫进程状态"""
    global CRAWLER_RUNNING, CRAWLER_ERROR
    
    logger.info("启动爬虫进程监控线程")
    
    while CRAWLER_PROCESS and CRAWLER_RUNNING:
        # 检查进程是否仍在运行
        if CRAWLER_PROCESS.poll() is not None:
            stdout, stderr = CRAWLER_PROCESS.communicate()
            error_msg = f"爬虫进程已退出: {stderr}"
            logger.error(error_msg)
            CRAWLER_ERROR = error_msg
            CRAWLER_RUNNING = False
            break
        
        # 每5秒检查一次
        time.sleep(5)
    
    logger.info("爬虫进程监控线程结束")

def get_crawler_url():
    """获取爬虫应用的URL"""
    return CRAWLER_URL

def get_crawler_status():
    """获取爬虫状态"""
    global CRAWLER_RUNNING, CRAWLER_ERROR
    
    # 如果爬虫正在运行，检查服务是否可访问
    if CRAWLER_RUNNING and CRAWLER_PROCESS and CRAWLER_PROCESS.poll() is None:
        try:
            response = requests.get(f"{CRAWLER_URL}/", timeout=1)
            if response.status_code == 200:
                return {
                    "running": True,
                    "url": CRAWLER_URL,
                    "error": None
                }
        except requests.exceptions.RequestException:
            CRAWLER_ERROR = "爬虫服务无响应"
    
    return {
        "running": CRAWLER_RUNNING,
        "url": CRAWLER_URL,
        "error": CRAWLER_ERROR
    }

def stop_crawler():
    """停止爬虫应用"""
    global CRAWLER_PROCESS, CRAWLER_RUNNING
    
    if CRAWLER_PROCESS:
        logger.info("停止爬虫进程")
        CRAWLER_PROCESS.terminate()
        try:
            CRAWLER_PROCESS.wait(timeout=5)
        except subprocess.TimeoutExpired:
            logger.warning("爬虫进程未在5秒内关闭，强制终止")
            CRAWLER_PROCESS.kill()
        
        CRAWLER_RUNNING = False
        return True
    
    return False

# 测试代码
if __name__ == "__main__":
    try:
        url = launch_crawler()
        print(f"爬虫已启动: {url}")
        
        # 保持程序运行
        while True:
            status = get_crawler_status()
            print(f"爬虫状态: {status}")
            time.sleep(10)
    except KeyboardInterrupt:
        print("程序被用户中断")
        stop_crawler()
    except Exception as e:
        print(f"发生错误: {str(e)}")
        stop_crawler() 