from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit
import threading
import time
import json
import os
import sys
import traceback
from datetime import datetime
import logging
from logging.handlers import RotatingFileHandler
import queue
import io  # 添加 io 模块导入

# 解决 Windows 控制台编码问题
if sys.platform == 'win32':
    # 修正拼写错误：将 id.TextIOWrapper 改为 io.TextIOWrapper
    if hasattr(sys.stdout, 'buffer'):
        sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
    if hasattr(sys.stderr, 'buffer'):
        sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')

# 初始化 Flask 应用
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your-secret-key-here'
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading')

# 配置日志
def setup_logging():
    """配置日志系统"""
    log_formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
    
    # 控制台日志
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(log_formatter)
    
    # 文件日志 - 使用 UTF-8 编码
    file_handler = RotatingFileHandler('crawler_manager.log', maxBytes=10 * 1024 * 1024, backupCount=5, encoding='utf-8')
    file_handler.setFormatter(log_formatter)
    
    # 获取根日志器
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    root_logger.addHandler(console_handler)
    root_logger.addHandler(file_handler)
    
    # 禁用 Flask 的默认日志处理器
    werkzeug_logger = logging.getLogger('werkzeug')
    werkzeug_logger.setLevel(logging.WARNING)
    werkzeug_logger.addHandler(file_handler)

# 设置日志
setup_logging()
logger = logging.getLogger(__name__)

class WebSocketLogHandler(logging.Handler):
    """用于将日志发送到WebSocket客户端的日志处理器"""
    def __init__(self, crawler_type):
        super().__init__()
        self.crawler_type = crawler_type
        self.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
        self.buffer = []  # 缓冲区用于批量发送日志
        self.last_flush_time = time.time()
    
    def emit(self, record):
        """处理日志记录"""
        self.buffer.append(record)
        current_time = time.time()
        
        # 如果缓冲区中有记录且超过0.5秒没有刷新，则刷新缓冲区
        if self.buffer and current_time - self.last_flush_time > 0.5:
            self.flush_buffer()
    
    def flush_buffer(self):
        if not self.buffer:
            return
            
        try:
            # 按时间排序，确保顺序正确
            records = sorted(self.buffer, key=lambda r: r.created)
            self.buffer = []
            
            for record in records:
                # 转换日志级别
                level_map = {
                    logging.DEBUG: 'debug',
                    logging.INFO: 'info',
                    logging.WARNING: 'warning',
                    logging.ERROR: 'error',
                    logging.CRITICAL: 'critical'
                }
                
                log_entry = {
                    'crawler_type': self.crawler_type,
                    'timestamp': datetime.fromtimestamp(record.created).strftime("%H:%M:%S"),
                    'message': record.getMessage(),
                    'type': level_map.get(record.levelno, 'info')
                }
                
                socketio.emit('log_message', log_entry, namespace='/crawler')
        
        except Exception as e:
            logger.error(f"日志发送异常: {str(e)}")
        finally:
            self.last_flush_time = time.time()
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_value, traceback):
        """上下文管理器出口 - 确保所有日志被发送"""
        self.flush_buffer()

# 导入爬虫类
try:
    from semantic_scholar_crawler import SemanticScholarCrawler
    from scholar_crawler import ScholarCrawler
    from dblp_url_crawler import DBLPCrawler
    from test_crawler import TestCrawler
    logger.info("所有爬虫模块导入成功")
except ImportError as e:
    logger.error(f"导入爬虫模块失败: {str(e)}")
    # 创建虚拟爬虫类以防导入失败
    class BaseCrawler:
        def __init__(self, status_callback=None, logger=None):
            self.status_callback = status_callback
            self.logger = logger or logging.getLogger(__name__)
            self.is_running = False
            self.processed_count = 0
            self.total_count = 0
            self.current_author = ""
            self.start_time = None
            self.stop_requested = False
        
        def stop(self):
            """停止爬虫"""
            self.stop_requested = True
            self.is_running = False
            if self.logger:
                self.logger.info("爬虫停止请求已接收")
        
        def _get_mongodb_connection(self):
            return None
        
        def process_mongodb_documents(self, start_index=1, stop_check=None):
            pass
        
        def update_author_urls(self, input_file=None, output_file=None, db_name="LZQ", collection_name="LIST"):
            pass
    
    SemanticScholarCrawler = BaseCrawler
    ScholarCrawler = BaseCrawler
    DBLPCrawler = BaseCrawler
    TestCrawler = BaseCrawler

# 多爬虫管理
crawler_threads = {}  # 存储每个爬虫的线程
crawler_status = {}   # 存储每个爬虫的状态
crawler_loggers = {}  # 存储每个爬虫的日志器

# 为每个爬虫类型初始化独立的状态
crawler_types = ['semantic_scholar', 'scholar', 'dblp', 'test']
for crawler_type in crawler_types:
    crawler_status[crawler_type] = {
        'is_running': False,
        'start_time': None,
        'processed_count': 0,
        'total_count': 0,
        'success_count': 0,
        'current_author': '',
        'error_message': ''
    }
    
    # 为每个爬虫创建专用日志器
    crawler_logger = logging.getLogger(f"crawler.{crawler_type}")
    crawler_logger.setLevel(logging.INFO)
    crawler_logger.propagate = False  # 防止日志传播到根日志器
    
    # 存储日志器
    crawler_loggers[crawler_type] = crawler_logger

# 在start_crawler_task函数中添加更多状态信息
def start_crawler_task(crawler_type, start_index=1):
    """在后台线程中运行爬虫任务"""
    global crawler_threads, crawler_status
    
    try:
        # 初始化爬虫状态
        crawler_status[crawler_type]['is_running'] = True
        crawler_status[crawler_type]['start_time'] = datetime.now()
        crawler_status[crawler_type]['processed_count'] = 0
        crawler_status[crawler_type]['total_count'] = 0  # 添加总数量
        crawler_status[crawler_type]['success_count'] = 0  # 添加成功数量
        crawler_status[crawler_type]['current_author'] = ''  # 添加当前作者
        crawler_status[crawler_type]['error_message'] = ''
        
        # 获取专用日志器
        crawler_logger = crawler_loggers[crawler_type]
        
        # 清除之前的处理器，避免重复
        for handler in crawler_logger.handlers[:]:
            if isinstance(handler, WebSocketLogHandler):
                crawler_logger.removeHandler(handler)
        
        # 使用WebSocket日志处理器
        with WebSocketLogHandler(crawler_type) as ws_handler:
            # 添加WebSocket处理器到日志器
            ws_handler.setLevel(logging.INFO)
            crawler_logger.addHandler(ws_handler)
            
            # 发送开始状态
            socketio.emit('crawler_status', {
                'crawler_type': crawler_type,
                'status': 'started', 
                'message': f'{crawler_type}爬虫任务已开始',
                'start_time': crawler_status[crawler_type]['start_time'].isoformat(),
                'processed_count': 0,
                'total_count': 0,
                'success_count': 0,
                'current_author': '',
                'success_rate': 0
            }, namespace='/crawler')
            
            # 发送开始日志
            crawler_logger.info(f'开始执行爬虫任务: {crawler_type}')
            crawler_logger.info(f'起始索引: {start_index}')
            crawler_logger.info(f'开始时间: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}')
            
            # 创建状态回调函数
            def status_callback(status_data):
                """状态更新回调函数"""
                crawler_status[crawler_type].update(status_data)
                # 确保包含所有必要的字段
                if 'success_count' not in status_data:
                    status_data['success_count'] = crawler_status[crawler_type].get('success_count', 0)
                if 'success_rate' not in status_data and status_data.get('processed_count', 0) > 0:
                    status_data['success_rate'] = round((status_data['success_count'] / status_data['processed_count']) * 100, 2)
                socketio.emit('crawler_status', status_data, namespace='/crawler')
            
            # 根据爬虫类型创建相应的爬虫实例
            crawler = None
            if crawler_type == 'semantic_scholar':
                crawler_logger.info("创建 Semantic Scholar 爬虫实例...")
                crawler = SemanticScholarCrawler(status_callback=status_callback, logger=crawler_logger)
            elif crawler_type == 'scholar':
                crawler_logger.info("创建 Scholar 爬虫实例...")
                crawler = ScholarCrawler(status_callback=status_callback, logger=crawler_logger)
            elif crawler_type == 'dblp':
                crawler_logger.info("创建 DBLP 爬虫实例...")
                crawler = DBLPCrawler(status_callback=status_callback, logger=crawler_logger)
            elif crawler_type == 'test':
                crawler_logger.info("创建测试爬虫实例...")
                crawler = TestCrawler(status_callback=status_callback, logger=crawler_logger)
            else:
                raise ValueError(f"未知的爬虫类型: {crawler_type}")
            
            # 将爬虫实例保存到线程对象，以便停止时能够访问
            import threading
            current_thread = threading.current_thread()
            current_thread.crawler = crawler
            
            # 对于需要 MongoDB 的爬虫进行连接检查
            if crawler_type in ['semantic_scholar', 'scholar', 'test']:
                crawler_logger.info("检查MongoDB连接...")
                try:
                    client = crawler._get_mongodb_connection()
                    if client:
                        crawler_logger.info("MongoDB连接测试成功")
                    else:
                        crawler_logger.error("MongoDB连接测试失败")
                        crawler_logger.error("请检查网络连接和数据库配置")
                        return
                except Exception as e:
                    crawler_logger.error(f"MongoDB连接异常: {str(e)}")
                    return
            elif crawler_type == 'dblp':
                crawler_logger.info("DBLP爬虫不需要MongoDB连接")
            
            # 执行爬虫任务
            if crawler_type == 'dblp':
                crawler_logger.info(f"处理DBLP输入文件...")
                input_file = 'Results_LZQ.LIST_no_arxiv.json'
                if os.path.exists(input_file):
                    crawler_logger.info(f"找到输入文件: {input_file}")
                    crawler.update_author_urls(
                        input_file=input_file,
                        stop_check=lambda: not crawler_status[crawler_type]['is_running']
                    )
                else:
                    crawler_logger.error(f"输入文件不存在: {input_file}")
                    return
            else:
                crawler_logger.info(f"开始处理MongoDB文档，起始索引: {start_index}")
                
                # 获取文档总数
                try:
                    client = crawler._get_mongodb_connection()
                    if client:
                        db = client['LZQ']
                        collection = db['LIST']
                        total_count = collection.count_documents({})
                        crawler_status[crawler_type]['total_count'] = total_count
                        
                        # 发送状态更新
                        socketio.emit('crawler_status', {
                            'crawler_type': crawler_type,
                            'total_count': total_count
                        }, namespace='/crawler')
                except Exception as e:
                    crawler_logger.error(f"获取文档总数失败: {str(e)}")
                
                # 执行爬虫任务
                crawler.process_mongodb_documents(
                    start_index=start_index, 
                    stop_check=lambda: not crawler_status[crawler_type]['is_running']
                )
            
            if crawler_status[crawler_type]['is_running']:
                crawler_logger.info(f"{crawler_type} 爬虫任务执行完成")
                status = crawler_status[crawler_type]
                success_rate = 0
                if status['processed_count'] > 0:
                    success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
                
                socketio.emit('crawler_status', {
                    'crawler_type': crawler_type,
                    'status': 'completed', 
                    'message': f'{crawler_type}爬虫任务已完成',
                    'processed_count': status['processed_count'],
                    'total_count': status['total_count'],
                    'success_count': status['success_count'],
                    'success_rate': success_rate,
                    'current_author': status['current_author']
                }, namespace='/crawler')
            else:
                crawler_logger.info(f"{crawler_type} 爬虫任务被用户停止")
                status = crawler_status[crawler_type]
                success_rate = 0
                if status['processed_count'] > 0:
                    success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
                
                socketio.emit('crawler_status', {
                    'crawler_type': crawler_type,
                    'status': 'stopped', 
                    'message': f'{crawler_type}爬虫任务已停止',
                    'processed_count': status['processed_count'],
                    'total_count': status['total_count'],
                    'success_count': status['success_count'],
                    'success_rate': success_rate,
                    'current_author': status['current_author']
                }, namespace='/crawler')
            
            # 发送完成日志
            crawler_logger.info(f"{crawler_type} 爬虫任务结束")
        
    except Exception as e:
        error_msg = f"{crawler_type}爬虫任务出错: {str(e)}"
        
        # 获取专用日志器记录错误
        crawler_logger = crawler_loggers[crawler_type]
        crawler_logger.error(error_msg)
        crawler_logger.error(traceback.format_exc())
        
        # 更新错误消息
        crawler_status[crawler_type]['error_message'] = error_msg
        
        # 发送错误状态
        status = crawler_status[crawler_type]
        success_rate = 0
        if status['processed_count'] > 0:
            success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
        
        socketio.emit('crawler_status', {
            'crawler_type': crawler_type,
            'status': 'error', 
            'message': error_msg,
            'processed_count': status['processed_count'],
            'total_count': status['total_count'],
            'success_count': status['success_count'],
            'success_rate': success_rate,
            'current_author': status['current_author']
        }, namespace='/crawler')
        
        # 打印完整的异常堆栈
        traceback.print_exc()
    finally:
        crawler_status[crawler_type]['is_running'] = False
        # 清理线程引用
        if crawler_type in crawler_threads:
            del crawler_threads[crawler_type]

# 添加状态更新函数
def update_crawler_progress(crawler_type, processed_count, success_count, current_author):
    """更新爬虫进度"""
    global crawler_status
    
    # 更新状态
    crawler_status[crawler_type]['processed_count'] = processed_count
    crawler_status[crawler_type]['success_count'] = success_count
    crawler_status[crawler_type]['current_author'] = current_author
    
    # 计算成功率
    success_rate = 0
    if processed_count > 0:
        success_rate = round((success_count / processed_count) * 100, 2)
    
    # 发送状态更新
    socketio.emit('crawler_status', {
        'crawler_type': crawler_type,
        'processed_count': processed_count,
        'success_count': success_count,
        'success_rate': success_rate,
        'current_author': current_author
    }, namespace='/crawler')

@app.route('/')
def index():
    """导航页面"""
    return render_template('index.html')

@app.route('/semantic_scholar')
def semantic_scholar():
    """Semantic Scholar 爬虫页面"""
    return render_template('semantic_scholar.html')

@app.route('/scholar')
def scholar():
    """Google Scholar 爬虫页面"""
    return render_template('scholar.html')

@app.route('/dblp')
def dblp():
    """DBLP 爬虫页面"""
    return render_template('dblp.html')

@app.route('/test')
def test():
    """测试爬虫页面"""
    return render_template('test.html')

@app.route('/logs_monitor')
def logs_monitor():
    """统一日志监控页面"""
    return render_template('logs_monitor.html')

@app.route('/api/start_crawler', methods=['POST'])
def start_crawler():
    """启动爬虫任务"""
    global crawler_threads, crawler_status
    
    try:
        data = request.get_json()
        crawler_type = data.get('crawler_type', 'semantic_scholar')
        start_index = data.get('start_index', 1) if data else 1
        
        logger.info(f"收到启动爬虫请求: {crawler_type}")
        
        # 检查爬虫是否已在运行
        if crawler_type in crawler_status and crawler_status[crawler_type]['is_running']:
            logger.warning(f"爬虫 {crawler_type} 已在运行中，拒绝新请求")
            return jsonify({'success': False, 'message': f'{crawler_type}爬虫任务已在运行中'})
        
        # 验证爬虫类型
        valid_types = ['semantic_scholar', 'scholar', 'dblp', 'test']
        if crawler_type not in valid_types:
            logger.error(f"无效的爬虫类型: {crawler_type}")
            return jsonify({'success': False, 'message': f'无效的爬虫类型: {crawler_type}'})
        
        logger.info(f"爬虫类型验证通过，开始启动后台线程")
        
        # 启动后台线程
        crawler_thread = threading.Thread(
            target=start_crawler_task,
            args=(crawler_type, start_index),
            daemon=True
        )
        crawler_threads[crawler_type] = crawler_thread
        crawler_thread.start()
        
        logger.info(f"后台线程已启动")
        return jsonify({'success': True, 'message': f'{crawler_type}爬虫任务已启动'})
    except Exception as e:
        logger.error(f"启动爬虫时出错: {str(e)}")
        return jsonify({'success': False, 'message': f'启动失败: {str(e)}'})

@app.route('/api/stop_crawler', methods=['POST'])
def stop_crawler():
    """停止爬虫任务"""
    global crawler_status
    
    try:
        data = request.get_json()
        crawler_type = data.get('crawler_type', 'all')
        
        if crawler_type == 'all':
            # 停止所有爬虫
            stopped_count = 0
            for ct in crawler_status:
                if crawler_status[ct]['is_running']:
                    crawler_status[ct]['is_running'] = False
                    stopped_count += 1
                    
                    # 如果有爬虫实例，调用其stop方法
                    if ct in crawler_threads:
                        try:
                            thread = crawler_threads[ct]
                            if hasattr(thread, 'crawler'):
                                thread.crawler.stop()
                                logger.info(f"已调用 {ct} 爬虫的stop方法")
                        except Exception as e:
                            logger.warning(f"调用 {ct} 爬虫stop方法失败: {str(e)}")
                    
                    # 发送停止信号
                    status = crawler_status[ct]
                    success_rate = 0
                    if status['processed_count'] > 0:
                        success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
                    
                    socketio.emit('crawler_status', {
                        'crawler_type': ct,
                        'status': 'stopped', 
                        'message': f'{ct}爬虫任务已停止',
                        'processed_count': status['processed_count'],
                        'total_count': status['total_count'],
                        'success_count': status['success_count'],
                        'success_rate': success_rate,
                        'current_author': status['current_author']
                    }, namespace='/crawler')
            
            if stopped_count > 0:
                return jsonify({'success': True, 'message': f'已停止 {stopped_count} 个爬虫任务'})
            else:
                return jsonify({'success': False, 'message': '没有正在运行的爬虫任务'})
        else:
            # 停止特定爬虫
            if crawler_type not in crawler_status or not crawler_status[crawler_type]['is_running']:
                return jsonify({'success': False, 'message': f'{crawler_type}爬虫任务未在运行'})
            
            # 设置停止标志
            crawler_status[crawler_type]['is_running'] = False
            crawler_status[crawler_type]['status'] = 'stopped'
            
            # 如果有爬虫实例，调用其stop方法
            if crawler_type in crawler_threads:
                try:
                    # 获取线程对象
                    thread = crawler_threads[crawler_type]
                    # 如果线程有crawler属性，调用其stop方法
                    if hasattr(thread, 'crawler'):
                        thread.crawler.stop()
                        logger.info(f"已调用 {crawler_type} 爬虫的stop方法")
                    else:
                        logger.warning(f"线程对象没有crawler属性，无法调用stop方法")
                except Exception as e:
                    logger.warning(f"调用爬虫stop方法失败: {str(e)}")
            
            # 发送停止信号
            status = crawler_status[crawler_type]
            success_rate = 0
            if status['processed_count'] > 0:
                success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
            
            socketio.emit('crawler_status', {
                'crawler_type': crawler_type,
                'status': 'stopped', 
                'message': f'{crawler_type}爬虫任务已停止',
                'processed_count': status['processed_count'],
                'total_count': status['total_count'],
                'success_count': status['success_count'],
                'success_rate': success_rate,
                'current_author': status['current_author']
            }, namespace='/crawler')
            
            return jsonify({'success': True, 'message': f'{crawler_type}爬虫任务已停止'})
            
    except Exception as e:
        logger.error(f"停止爬虫失败: {str(e)}")
        return jsonify({'success': False, 'message': f'停止失败: {str(e)}'})

@app.route('/api/status')
def get_status():
    """获取所有爬虫状态"""
    return jsonify({
        'crawlers': crawler_status,
        'running_count': sum(1 for status in crawler_status.values() if status['is_running'])
    })

@app.route('/api/get_results')
def get_results():
    """获取结果文件内容"""
    try:
        # 读取结果文件
        file_path = 'Results_Semantic_Scholar.json'
        if not os.path.exists(file_path):
            return jsonify({
                'success': False,
                'message': '结果文件不存在'
            })
        
        with open(file_path, 'r', encoding='utf-8') as f:
            results = json.load(f)
        
        return jsonify({
            'success': True,
            'results': results,
            'count': len(results)
        })
    except Exception as e:
        return jsonify({
            'success': False,
            'message': f'读取结果文件失败: {str(e)}'
        })

@app.route('/api/results/<crawler_type>')
def get_specific_results(crawler_type):
    """获取特定爬虫的结果文件数据（返回完整JSON结构）"""
    try:
        # 验证爬虫类型
        valid_types = ['semantic_scholar', 'scholar', 'dblp', 'test']
        if crawler_type not in valid_types:
            return jsonify({
                'success': False,
                'message': f'无效的爬虫类型: {crawler_type}'
            })
        
        # 获取对应的结果文件
        result_files = {
            'semantic_scholar': 'Results_Semantic_Scholar.json',
            'scholar': 'Results_LZQ.LIST_no_arxiv.json',
            'dblp': 'Results_LZQ.LIST_dblp_only.json',
            'test': 'test_results.json'
        }
        
        filename = result_files.get(crawler_type)
        
        if not filename or not os.path.exists(filename):
            return jsonify({
                'success': False,
                'message': f'结果文件不存在: {filename}'
            })
        
        # 读取文件内容（完整JSON结构）
        with open(filename, 'r', encoding='utf-8') as f:
            raw_data = json.load(f)
        
        # 返回原始数据格式
        return jsonify({
            'success': True,
            'results': raw_data,
            'count': len(raw_data),
            'filename': filename,
            'crawler_type': crawler_type,
            'timestamp': datetime.now().isoformat()
        })
        
    except json.JSONDecodeError as e:
        logger.error(f"JSON文件格式错误: {str(e)}")
        return jsonify({
            'success': False,
            'message': f'JSON文件格式错误: {str(e)}'
        })
    except Exception as e:
        logger.error(f"获取结果数据失败: {str(e)}")
        traceback.print_exc()
        return jsonify({
            'success': False,
            'message': f'获取结果数据失败: {str(e)}'
        })
# WebSocket 连接和事件处理
@socketio.on('connect', namespace='/crawler')
def handle_connect():
    """客户端连接事件"""
    try:
        # 从请求参数中获取爬虫类型
        crawler_type = request.args.get('type')
        if not crawler_type:
            logger.warning("客户端连接未指定爬虫类型")
            return False
        
        if crawler_type not in crawler_types:
            logger.warning(f"无效的爬虫类型: {crawler_type}")
            return False
        
        logger.info(f'客户端连接到爬虫 {crawler_type}')
        
        # 发送连接成功消息 - 使用正确的格式
        emit('log_message', {
            'crawler_type': crawler_type,
            'timestamp': datetime.now().strftime("%H:%M:%S"),
            'message': f'WebSocket连接已建立',
            'type': 'info'
        }, namespace='/crawler')
        
        # 发送当前状态
        if crawler_type in crawler_status:
            status = crawler_status[crawler_type]
            success_rate = 0
            if status['processed_count'] > 0:
                success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
            
            emit('crawler_status', {
                'crawler_type': crawler_type,
                'status': 'running' if status['is_running'] else 'idle',
                'message': f'{crawler_type}爬虫当前状态',
                'start_time': status['start_time'].isoformat() if status['start_time'] else None,
                'processed_count': status['processed_count'],
                'total_count': status['total_count'],
                'success_count': status['success_count'],
                'success_rate': success_rate,
                'current_author': status['current_author']
            }, namespace='/crawler')
    except Exception as e:
        logger.error(f"处理WebSocket连接时出错: {str(e)}")

@socketio.on('disconnect', namespace='/crawler')
def handle_disconnect():
    """客户端断开连接事件"""
    # 由于多个客户端可能同时连接，不再记录断开连接信息
    pass

# 心跳检测函数
def heartbeat_monitor():
    """定期发送心跳信号"""
    while True:
        try:
            # 发送心跳信号
            socketio.emit('heartbeat', {
                'time': datetime.now().isoformat(),
                'message': '系统心跳信号'
            }, namespace='/crawler')
            
            time.sleep(30)  # 每30秒发送一次心跳
        except Exception as e:
            logger.error(f"心跳检测出错: {str(e)}")
            time.sleep(60)  # 出错时等待更长时间

# 创建心跳线程
heartbeat_thread = threading.Thread(target=heartbeat_monitor, daemon=True)
heartbeat_thread.start()

# 线程监控函数
def monitor_threads():
    """监控爬虫线程状态"""
    while True:
        for crawler_type, thread in list(crawler_threads.items()):
            if not thread.is_alive():
                logger.warning(f"线程 {crawler_type} 已终止")
                del crawler_threads[crawler_type]
                
                # 更新状态
                if crawler_status.get(crawler_type, {}).get('is_running', False):
                    crawler_status[crawler_type]['is_running'] = False
                    # 发送意外终止信号
                    status = crawler_status[crawler_type]
                    success_rate = 0
                    if status['processed_count'] > 0:
                        success_rate = round((status['success_count'] / status['processed_count']) * 100, 2)
                    
                    socketio.emit('crawler_status', {
                        'crawler_type': crawler_type,
                        'status': 'stopped', 
                        'message': f'{crawler_type}爬虫任务意外终止',
                        'processed_count': status['processed_count'],
                        'total_count': status['total_count'],
                        'success_count': status['success_count'],
                        'success_rate': success_rate,
                        'current_author': status['current_author']
                    }, namespace='/crawler')
        
        time.sleep(5)  # 每5秒检查一次

if __name__ == '__main__':
    # 创建必要的目录
    os.makedirs('templates', exist_ok=True)
    os.makedirs('static', exist_ok=True)
    os.makedirs('static/css', exist_ok=True)
    os.makedirs('static/js', exist_ok=True)
    
    # 创建监控线程
    monitor_thread = threading.Thread(target=monitor_threads, daemon=True)
    monitor_thread.start()
    
    logger.info("启动多爬虫Web应用...")
    logger.info("访问 http://localhost:5000 查看前端界面")
    
    socketio.run(app, host='0.0.0.0', port=5000, debug=True)