""" 爬虫工作器，用于控制爬虫的执行和管理爬虫进程 """
import os
import sys
import time
import multiprocessing
from datetime import datetime
import signal
import subprocess
import platform  # 添加平台检测模块
import threading

# 检测当前操作系统
IS_WINDOWS = platform.system() == 'Windows'

# 动态导入仅在特定系统可用的模块
setsid = None
if not IS_WINDOWS:
    try:
        from os import setsid
    except ImportError:
        pass
# Windows平台的信号替代
signal_constants = {
    'SIGTERM': getattr(signal, 'SIGTERM', 15),
    'SIGKILL': getattr(signal, 'SIGKILL', 9) if not IS_WINDOWS else 9
}

from fin_senti_entity_platform.data_collection.distributed_spider.url_manager import URLManager
from fin_senti_entity_platform.utils.config_loader import config_loader
from fin_senti_entity_platform.utils.logger import Logger

# 初始化日志记录器
logger = Logger.get_logger('crawler_worker', 'data_collection.log')


class ProcessStrategy:
    """\进程管理策略基类"""
    def start_process(self, cmd, log_file):
        """启动进程的抽象方法"""
        raise NotImplementedError
        
    def terminate_process(self, process):
        """终止进程的抽象方法"""
        raise NotImplementedError
        
    def kill_process(self, process):
        """强制终止进程的抽象方法"""
        raise NotImplementedError


class WindowsProcessStrategy(ProcessStrategy):
    """Windows系统进程管理策略"""
    def start_process(self, cmd, log_file):
        return subprocess.Popen(
            cmd, shell=True,
            stdout=log_file, stderr=subprocess.STDOUT
        )
        
    def terminate_process(self, process):
        logger.info(f"在Windows系统上终止进程 {process.pid}")
        process.terminate()
        
    def kill_process(self, process):
        logger.warning(f"在Windows系统上强制终止进程 {process.pid}")
        process.kill()


class UnixProcessStrategy(ProcessStrategy):
    """Unix/Linux系统进程管理策略"""
    def start_process(self, cmd, log_file):
        if setsid:
            return subprocess.Popen(
                cmd, shell=True,
                stdout=log_file, stderr=subprocess.STDOUT,
                preexec_fn=setsid  # 仅在非Windows系统使用
            )
        else:
            # 当setsid不可用时的备选方案
            logger.warning("setsid不可用，使用普通方式启动进程")
            return subprocess.Popen(
                cmd, shell=True,
                stdout=log_file, stderr=subprocess.STDOUT
            )
            
    def terminate_process(self, process):
        if not IS_WINDOWS and hasattr(os, 'getpgid') and hasattr(os, 'killpg'):
            try:
                logger.info(f"在Unix/Linux系统上终止进程组 {os.getpgid(process.pid)}")
                os.killpg(os.getpgid(process.pid), signal_constants['SIGTERM'])
            except (AttributeError, OSError):
                # 降级为普通终止方式
                logger.warning("无法终止进程组，尝试单独终止进程")
                process.terminate()
        else:
            process.terminate()
            
    def kill_process(self, process):
        if not IS_WINDOWS and hasattr(os, 'getpgid') and hasattr(os, 'killpg'):
            try:
                logger.warning(f"在Unix/Linux系统上强制终止进程组 {os.getpgid(process.pid)}")
                os.killpg(os.getpgid(process.pid), signal_constants['SIGKILL'])
            except (AttributeError, OSError):
                # 降级为普通终止方式
                logger.warning("无法强制终止进程组，尝试单独强制终止进程")
                process.kill()
        else:
            process.kill()


class CrawlerWorker:
    """
    爬虫工作器，负责管理爬虫进程
    支持多进程爬取和分布式爬取模式
    """
    def __init__(self, worker_id, spider_class, use_distributed=False):
        """
        初始化爬虫工作器
        :param worker_id: 工作器ID
        :param spider_class: 爬虫类
        :param use_distributed: 是否使用分布式模式
        """
        self.worker_id = worker_id
        self.spider_class = spider_class
        self.spider_name = spider_class.name
        self.use_distributed = use_distributed
        self.process = None
        self.status = 'idle'  # idle, running, stopped, error
        self.start_time = None
        self.end_time = None
        self.log_file = None
        self.url_manager = URLManager(use_redis=use_distributed)
        
        # 加载工作器配置
        self.config = config_loader.get('data_collection', {}).get('workers', {}).get(self.spider_name, {})
        self.max_processes = self.config.get('max_processes', multiprocessing.cpu_count())
        self.max_running_time = self.config.get('max_running_time', 3600)  # 默认运行1小时
        self.log_dir = self.config.get('log_dir', 'logs')
        
        # 根据操作系统类型选择进程管理策略
        self.process_strategy = WindowsProcessStrategy() if IS_WINDOWS else UnixProcessStrategy()
        
        # 注册信号处理
        self._register_signal_handlers()
        
        logger.info(f"爬虫工作器 {worker_id} 初始化完成，目标爬虫: {self.spider_name}")
        logger.info(f"当前操作系统: {'Windows' if IS_WINDOWS else 'Unix/Linux'}")
        
    def _register_signal_handlers(self):
        """
        注册信号处理器
        """
        def signal_handler(sig, frame):
            logger.info(f"接收到信号 {sig}，正在停止工作器 {self.worker_id}")
            self.stop()
            sys.exit(0)
            
        # 安全注册信号处理器，避免在不支持的平台上出错
        try:
            signal.signal(signal.SIGINT, signal_handler)
            signal.signal(signal.SIGTERM, signal_handler)
        except ValueError as e:
            logger.warning(f"在当前平台上无法注册某些信号处理器: {str(e)}")
            
    def start(self):
        """
        启动爬虫工作器
        """
        if self.status == 'running':
            logger.warning(f"工作器 {self.worker_id} 已经在运行中")
            return False
            
        try:
            # 设置工作器状态
            self.status = 'running'
            self.start_time = datetime.now()
            
            # 根据分布式模式选择启动方式
            if self.use_distributed:
                self._start_distributed_crawler()
            else:
                self._start_local_crawler()
                
            logger.info(f"爬虫工作器 {self.worker_id} 已启动")
            return True
        except Exception as e:
            logger.error(f"启动爬虫工作器 {self.worker_id} 失败: {str(e)}")
            self.status = 'error'
            # 确保资源被释放
            self._cleanup_resources()
            return False
            
    def _start_local_crawler(self):
        """
        启动本地模式爬虫
        """
        # 使用Scrapy命令行启动爬虫
        cmd = f"scrapy crawl {self.spider_name} -a worker_id={self.worker_id}"
        
        # 创建日志文件并启动进程
        self._create_and_start_process(cmd, f"{self.spider_name}_worker_{self.worker_id}")
        
    def _start_distributed_crawler(self):
        """
        启动分布式模式爬虫
        """
        # 分布式模式下，爬虫需要连接到Redis队列获取任务
        cmd = f"scrapy crawl {self.spider_name} -a worker_id={self.worker_id} -a distributed=True"
        
        # 创建日志文件并启动进程
        self._create_and_start_process(cmd, f"{self.spider_name}_distributed_worker_{self.worker_id}")
        
        # 启动监控线程
        self._monitor_process()
        
    def _create_and_start_process(self, cmd, log_file_prefix):
        """
        创建日志文件并启动进程的通用方法
        :param cmd: 要执行的命令
        :param log_file_prefix: 日志文件前缀
        """
        try:
            # 确保日志目录存在
            os.makedirs(self.log_dir, exist_ok=True)
            
            # 创建日志文件
            log_file_name = f"{log_file_prefix}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
            log_file_path = os.path.join(self.log_dir, log_file_name)
            
            logger.info(f"执行命令: {cmd}，日志输出到 {log_file_path}")
            
            # 打开日志文件并启动进程
            self.log_file = open(log_file_path, 'w', encoding='utf-8')
            self.process = self.process_strategy.start_process(cmd, self.log_file)
            
        except FileNotFoundError:
            logger.error(f"找不到命令或文件: {cmd}")
            self.status = 'error'
            self._cleanup_resources()
        except PermissionError:
            logger.error(f"没有权限执行命令或访问文件: {cmd}")
            self.status = 'error'
            self._cleanup_resources()
        except Exception as e:
            logger.error(f"启动进程失败: {str(e)}")
            self.status = 'error'
            self._cleanup_resources()
            
    def _monitor_process(self):
        """
        监控爬虫进程状态
        """
        def monitor():
            while self.status == 'running':
                if self.process is None:
                    self.status = 'stopped'
                    break
                    
                # 检查进程是否还在运行
                try:
                    if self.process.poll() is not None:
                        # 进程已结束
                        exit_code = self.process.returncode
                        if exit_code == 0:
                            logger.info(f"爬虫进程正常结束，退出码: {exit_code}")
                            self.status = 'stopped'
                        else:
                            logger.error(f"爬虫进程异常结束，退出码: {exit_code}")
                            self.status = 'error'
                        break
                        
                    # 检查运行时间是否超过最大限制
                    if self.max_running_time > 0:
                        if self.start_time is not None:
                            running_time = (datetime.now() - self.start_time).total_seconds()
                        if running_time > self.max_running_time:
                            logger.info(f"爬虫运行时间已超过最大限制 {self.max_running_time}s，正在停止")
                            self.stop()
                            break
                            
                except Exception as e:
                    logger.error(f"监控进程时发生错误: {str(e)}")
                    self.status = 'error'
                    break
                    
                # 每10秒检查一次
                time.sleep(10)
                
            self.end_time = datetime.now()
            self._cleanup_resources()
            
        # 启动监控线程
        monitor_thread = threading.Thread(target=monitor)
        monitor_thread.daemon = True
        monitor_thread.start()
        
    def _cleanup_resources(self):
        """
        清理资源
        """
        try:
            if self.log_file:
                self.log_file.close()
                self.log_file = None
        except Exception as e:
            logger.warning(f"关闭日志文件时发生错误: {str(e)}")
            
    def stop(self):
        """
        停止爬虫工作器
        """
        if self.status != 'running':
            logger.warning(f"工作器 {self.worker_id} 不在运行状态")
            return False
            
        try:
            if self.process is not None and self.process.poll() is None:
                # 尝试优雅终止
                try:
                    self.process_strategy.terminate_process(self.process)
                    
                    # 等待进程结束，最多等待30秒
                    start_wait = time.time()
                    while time.time() - start_wait < 30 and self.process.poll() is None:
                        time.sleep(1)
                    
                    # 如果进程还在运行，强制终止
                    if self.process.poll() is None:
                        logger.warning(f"进程未能正常终止，正在强制终止")
                        self.process_strategy.kill_process(self.process)
                        
                except Exception as e:
                    logger.error(f"终止进程时发生错误: {str(e)}")
                    # 尝试备用终止方法
                    try:
                        if IS_WINDOWS:
                            self.process.kill()
                        else:
                            self.process.terminate()
                    except:
                        pass
                    
            self.status = 'stopped'
            self.end_time = datetime.now()
            self._cleanup_resources()
            logger.info(f"爬虫工作器 {self.worker_id} 已停止")
            return True
        except Exception as e:
            logger.error(f"停止爬虫工作器 {self.worker_id} 失败: {str(e)}")
            self.status = 'error'
            self._cleanup_resources()
            return False
            
    def get_status(self):
        """
        获取工作器状态
        :return: 状态字典
        """
        status_info = {
            'worker_id': self.worker_id,
            'spider_name': self.spider_name,
            'status': self.status,
            'use_distributed': self.use_distributed,
            'os_type': 'Windows' if IS_WINDOWS else 'Unix/Linux'
        }
        
        if self.start_time:
            status_info['start_time'] = self.start_time.isoformat()
            
        if self.end_time:
            status_info['end_time'] = self.end_time.isoformat()
            status_info['running_time'] = (self.end_time - self.start_time).total_seconds()
        elif self.start_time:
            status_info['running_time'] = (datetime.now() - self.start_time).total_seconds()
        
        return status_info
        
    def add_urls(self, urls):
        """
        添加URL到URL管理器
        :param urls: URL列表
        :return: 成功添加的数量
        """
        return self.url_manager.add_new_urls(urls)