import webbrowser
from pathlib import Path

from flask import Flask, jsonify, render_template, request

from flask_cors import CORS

from datetime import datetime, timedelta
import subprocess
import os
import json
import sys
from threading import Thread

app = Flask(__name__, static_folder='static')

CORS(app)


@app.route('/')
def index():
    return render_template('index.html')


# 配置Scrapy项目路径和JSON文件路径
SCRAPY_PROJECT_PATH = Path(__file__).parent.parent / 'bilibili'
SPIDER_DATA = {
    'dance': {'json_path': 'dance.json', 'process': None},
    'game': {'json_path': 'game.json', 'process': None},
    'knowledge': {'json_path': 'knowledge.json', 'process': None},
    'movie': {'json_path': 'movie.json', 'process': None},
    'shortplay': {'json_path': 'shortplay.json', 'process': None},
}


def run_spider(spider_name):
    try:
        # 输出路径
        output_dir = Path(__file__).parent.parent / 'data_analysis_flask' / 'static'
        output_path = output_dir / f'{spider_name}.json'

        if output_path.exists():
            output_path.unlink()

        # 获取当前Python解释器路径
        python_path = sys.executable
        # Scrapy项目父目录路径（重要！）
        project_parent = str(SCRAPY_PROJECT_PATH.parent)

        # 构造命令（关键修改）
        scrapy_cmd = [
            python_path,  # 使用当前Python解释器
            "-m",  # 以模块方式运行
            "scrapy",  # 调用scrapy模块
            "crawl",
            spider_name,
            "-o",
            str(output_path)
        ]

        # 设置环境变量（确保能找到bilibili.settings）
        env = os.environ.copy()
        env["PYTHONPATH"] = project_parent  # 添加项目父目录到Python路径
        env["SCRAPY_SETTINGS_MODULE"] = "bilibili.settings"

        # 启动子进程
        subprocess.Popen(
            scrapy_cmd,
            cwd=str(SCRAPY_PROJECT_PATH),  # 工作目录设为Scrapy项目目录
            env=env
        )
        return True, "爬虫启动成功"
    except Exception as e:
        return False, f"启动失败: {str(e)}"



@app.route('/api/start/<spider_name>', methods=['POST'])
def start_spider(spider_name):
    app.logger.info(f"Received request to start spider: {spider_name}")  # 添加日志记录
    try:
        # 确认 spider_name 参数有效
        if spider_name not in ['game', 'dance', 'knowledge', 'movie', 'shortplay']:
            return jsonify({"message": "Invalid spider name"}), 400

        # 调用启动爬虫的逻辑（示例）
        # 实际应用中应替换为具体的启动逻辑
        result = run_spider(spider_name)

        return jsonify({"message": "Spider started successfully", "result": result}), 200

    except Exception as e:
        # 添加错误处理和日志记录
        app.logger.error(f"Failed to start spider {spider_name}: {str(e)}")
        return jsonify({"message": "Failed to start spider"}), 500


def get_spider_data_impl(spider_name):
    """读取指定爬虫的JSON数据"""
    try:
        json_path = Path(__file__).parent.parent / 'data_analysis_flask' / 'templates' / f'{spider_name}.json'

        # 检查文件是否存在
        if not os.path.exists(json_path):
            return None

        # 读取JSON文件
        with open(json_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        return data
    except Exception as e:
        print(f"Error reading data for {spider_name}: {str(e)}")
        return None


@app.route('/api/get/<spider_name>', methods=['GET'])
def get_spider_data(spider_name):
    """获取指定爬虫的数据"""
    data = get_spider_data_impl(spider_name)
    if data is not None:
        return jsonify({'status': 'success', 'data': data})
    return jsonify({'status': 'error', 'message': 'No data available or error reading data'}), 404


@app.route('/<spider_name>')
def data_view(spider_name):
    """数据展示页面"""
    return render_template(f'visualization_{spider_name}.html')

@app.route('/api/status', methods=['GET'])
def check_spider_status():
    """检查所有爬虫是否已完成，判断 JSON 是否完整"""
    status = {}
    output_dir = Path(__file__).parent.parent / 'data_analysis_flask' / 'static'

    for name in SPIDER_DATA.keys():
        output_path = output_dir / f"{name}.json"

        if output_path.exists() and output_path.stat().st_size > 2:
            try:
                with open(output_path, 'rb') as f:
                    f.seek(-10, os.SEEK_END)  # 从文件尾部向前最多读取10字节
                    tail = f.read().decode(errors='ignore')
                    if ']' in tail:
                        status[name] = False  # ✅ 文件完整
                        continue
            except Exception as e:
                print(f"[检查 {name}] 读取末尾失败：{e}")

        status[name] = True  # ❌ 文件不存在或不完整
    return jsonify(status)


if __name__ == '__main__':
    # 本地开发服务器地址
    url = "http://127.0.0.1:5000/"
    webbrowser.open_new(url)  # 自动在默认浏览器中打开首页
    app.run()

