import sys
import os
import threading
import time
from datetime import datetime

from web_app import get_db_connection

# 将visual_spider.py作为模块导入
sys.path.append(os.path.dirname(__file__))


def run_visual_spider(keyword, city, pages, user_id, task_id):
    """运行visual_spider爬虫程序"""
    try:
        # 动态导入visual_spider模块
        from visual_spider import main as vs_main
        from visual_spider import BossZPSpiderGUI  # 或者您爬虫的主类

        print(f"开始爬取: {keyword} - {city}, 页数: {pages}")

        # 方法1: 如果visual_spider有可调用的函数
        result_count = vs_main.run_crawl(keyword, city, pages, user_id)

        # 方法2: 如果需要在GUI模式下运行，可以创建隐藏窗口
        # result_count = run_spider_without_gui(keyword, city, pages, user_id)

        return result_count

    except Exception as e:
        print(f"爬虫执行错误: {str(e)}")
        # 备用方案: 使用subprocess调用
        return run_spider_via_subprocess(keyword, city, pages, user_id, task_id)


def run_spider_via_subprocess(keyword, city, pages, user_id, task_id):
    """通过子进程运行爬虫"""
    import subprocess
    try:
        # 构建命令行参数
        cmd = [
            sys.executable,  # 当前Python解释器
            "visual_spider.py",
            "--keyword", keyword,
            "--city", city,
            "--pages", str(pages),
            "--user_id", str(user_id),
            "--task_id", str(task_id),
            "--headless"  # 无头模式，不显示GUI
        ]

        result = subprocess.run(cmd, capture_output=True, text=True, timeout=3600)

        if result.returncode == 0:
            # 解析输出获取结果数量
            lines = result.stdout.split('\n')
            for line in lines:
                if "爬取完成" in line or "获取" in line:
                    # 从日志中提取数字
                    import re
                    numbers = re.findall(r'\d+', line)
                    if numbers:
                        return int(numbers[0])
            return 10  # 默认返回一个估计值
        else:
            print(f"子进程错误: {result.stderr}")
            return 0

    except Exception as e:
        print(f"子进程执行错误: {str(e)}")
        return 0


# 在Flask应用中修改爬取任务函数
def start_crawl_task_in_flask(keyword, city, pages, user_id, task_id):
    """在Flask中启动爬取任务"""
    try:
        # 更新任务状态为运行中
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute(
            "UPDATE crawl_tasks SET status = 'running', started_at = NOW() WHERE id = %s",
            (task_id,)
        )
        conn.commit()

        # 运行爬虫
        result_count = run_visual_spider(keyword, city, pages, user_id, task_id)

        # 更新任务状态
        cursor.execute(
            "UPDATE crawl_tasks SET status = 'completed', result_count = %s, completed_at = NOW() WHERE id = %s",
            (result_count, task_id)
        )
        conn.commit()

        cursor.close()
        conn.close()

        return result_count

    except Exception as e:
        # 更新为失败状态
        try:
            cursor.execute(
                "UPDATE crawl_tasks SET status = 'failed' WHERE id = %s",
                (task_id,)
            )
            conn.commit()
        except:
            pass
        raise e