# -*- coding: utf-8 -*-
"""IP端口扫描器模块
实现多进程多线程的TCP和UDP端口扫描功能
"""
import threading
import multiprocessing
import time
from datetime import datetime
import queue
from concurrent.futures import ThreadPoolExecutor
import logging
import copy
import signal
from src.core.utils import *
import traceback
import pymysql

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('IPScanner')

class MultiProcessThreadHandler:
    """通用多进程多线程处理器，用于对列表数据进行分组后由多进程并行处理，每个进程内使用多线程处理单个数据项
    
    主要功能：
    - 将输入数据列表分组后分配给多个进程并行处理
    - 每个进程内部使用线程池并发处理数据项
    - 支持网络连接状态检测和自动等待
    - 支持暂停/恢复处理操作
    - 支持Ctrl+C中断和优雅退出
    - 提供结果处理回调机制
    
    Args:
        db_module_path: 数据库连接器模块路径，可选
        max_processes: 最大进程数，默认4
        threads_per_process: 每个进程的线程数，默认10
        timeout: 处理超时时间，默认2秒
        retry_count: 处理失败重试次数，默认1
        network_check_ip: 用于检查网络连接的IP地址，默认8.8.8.8
        network_check_interval: 网络连接检查间隔，默认5秒
    
    使用示例:
        # 定义处理单个数据项的函数
        def process_item(item, params):
            # 处理逻辑
            result = {'item': item, 'result': f'Processed {item}'}
            return result
        
        # 定义结果处理函数
        def handle_result(result):
            print(f'Received result: {result}')
        
        # 创建处理器实例
        handler = MultiProcessThreadHandler(max_processes=2, threads_per_process=5)
        
        # 处理数据
        data_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        handler.process_data(data_list, process_item, handle_result)
    """
    
    def __init__(self, db_module_path='src.core.database', max_processes=4, 
                 threads_per_process=10, timeout=2, retry_count=1, 
                 network_check_ip='8.8.8.8', network_check_interval=1):
        # 进程和线程配置，最大进程不超过CPU数
        self.max_processes = min(max_processes, multiprocessing.cpu_count())
        self.threads_per_process = threads_per_process
        
        # 处理配置
        self.timeout = timeout
        self.retry_count = retry_count
        
        # 数据库配置
        self.db_module_path = db_module_path
        self.db_connector = None
        
        # 网络监控相关
        self.network_check_ip = network_check_ip
        self.network_check_interval = network_check_interval
        self.network_online = threading.Event()  # 网络在线状态标志
        self.network_online.clear()  # 初始状态为不在线，需要立即检测
        self.network_monitor_thread = None  # 网络监控线程
        self._network_monitor_running = False  # 网络监控线程运行标志
        
        # 暂停控制
        self.pause = threading.Event()  # 暂停标志
        self.pause.clear()  # 初始状态为非暂停
        
        # 结果处理
        self.results_queue = multiprocessing.Queue()  # 多进程共享的结果队列
        self._result_queue_running = False  # 结果队列运行标志
        
    def _start_network_monitor(self):
        """启动网络监控线程"""
        def monitor_network():
            while self._network_monitor_running:
                try:
                    # 检查网络连接状态
                    is_online = False
                    for i in range(2):
                        if is_ip_online(self.network_check_ip):
                            is_online = True
                            break
                    current_state = self.network_online.is_set()
                    
                    if is_online and not current_state:
                        # 网络从离线变为在线
                        self.network_online.set()
                        logger.info("网络连接恢复")
                    elif not is_online and current_state:
                        # 网络从在线变为离线
                        self.network_online.clear()
                        logger.warning(f"网络连接断开，请检查网络连接状态")
                    # 其他情况（状态未变）不做处理
                except Exception as status_error:
                    logger.error(f"更新网络状态异常: {status_error}")
                    # 发生异常时，尝试重新设置为在线状态，避免程序卡住
                    try:
                        self.network_online.set()
                    except:
                        pass
                except Exception as e:
                    logger.error(f"网络监控线程异常: {e}")
                    # 短暂暂停后继续，避免异常导致循环过快
                    time.sleep(1)
                
                # 等待一段时间后再次检查
                time.sleep(self.network_check_interval)
        
        # 创建并启动网络监控线程
        self._network_monitor_running = True
        self.network_monitor_thread = threading.Thread(target=monitor_network)
        self.network_monitor_thread.daemon = True  # 设置为守护线程，主程序结束时自动退出
        self.network_monitor_thread.start()
        
    def _stop_network_monitor(self):
        """停止网络监控线程"""
        self._network_monitor_running = False
        if self.network_monitor_thread:
            try:
                # 等待线程结束，最多等待1秒
                self.network_monitor_thread.join(timeout=1.0)
            except Exception as e:
                logger.error(f"停止网络监控线程异常: {e}")
    
    def _terminate_processes(self, processes):
        """终止所有进程并清理资源"""
        logger.info("正在终止处理进程并清理资源")
        # 终止所有子进程
        for p in processes:
            if p.is_alive():
                p.terminate()
        # 等待所有进程终止
        for p in processes:
            p.join(timeout=5)  # 最多等待5秒
        logger.info("所有处理进程已终止")
        # 设置结果队列不再运行
        self._result_queue_running = False
        # 停止网络监控线程
        self._stop_network_monitor()
        # 关闭数据库连接
        if self.db_connector:
            try:
                self.db_connector.disconnect()
            except:
                pass
        logger.info("所有资源已清理")
    
    def process_data(self, data_list, process_func, result_handler=None):
        """处理数据列表
        
        Args:
            data_list: 要处理的数据列表
            process_func: 处理单个数据项的函数
            result_handler: 处理结果的回调函数
        """
        if not data_list:
            print("没有需要处理的数据")
            return
            
        # 启动网络监控线程，马上检测检测网络是否在线
        if is_ip_online(self.network_check_ip):
            self.network_online.set()
        else:
            logger.error("检测到网络未连接，如果为非公网环境，请通过参数 `--network-check-ip` 指定网络检测地址")
            return
        self._start_network_monitor()
        
        # 确保在主进程中有数据库连接（如果需要）
        if self.db_module_path and not self.db_connector:
            try:
                # 动态导入DatabaseConnector类
                module_path, class_name = self.db_module_path.rsplit('.', 1)
                module = __import__(module_path, fromlist=[class_name])
                db_connector_class = getattr(module, class_name)
                self.db_connector = db_connector_class()
                self.db_connector.connect()
            except Exception as e:
                logger.error(f"创建数据库连接失败: {e}")
                return
        
        logger.info(f"开始处理 {len(data_list)} 个数据项")
        logger.info("处理开始...\n")

        # 如果提供了结果处理器，创建处理结果的线程
        result_thread = None
        if result_handler:
            self._result_queue_running = True
            result_thread = threading.Thread(target=self._process_results, args=(result_handler,))
            result_thread.daemon = True
            result_thread.start()
            
        # 进程池处理数据
        start_time = time.time()
        
        # 将数据列表分块，每块分配给一个进程
        chunk_size = max(1, len(data_list) // self.max_processes)
        data_chunks = [data_list[i:i+chunk_size] for i in range(0, len(data_list), chunk_size)]
        
        # 准备传递给子进程的参数
        process_params = {
            'timeout': self.timeout,
            'threads_per_process': self.threads_per_process,
            'retry_count': self.retry_count
        }
        
        processes = []
        for chunk in data_chunks:
            # 创建一个新的进程，传递可序列化的参数
            p = multiprocessing.Process(
                target=_process_data_chunk_wrapper,
                args=(chunk, copy.deepcopy(process_params), process_func, self.results_queue, self.pause, self.network_online)
            )
            p.daemon = True  # 设置为守护进程，当主进程退出时自动终止
            processes.append(p)
            p.start()
        
        # 等待所有进程完成，但允许中断
        while True:
            try:
                # 检查是否所有进程都完成
                alive = [p for p in processes if p.is_alive()]
                if not alive:
                    break
                time.sleep(1)
            except KeyboardInterrupt:
                # 捕获Ctrl+C中断，询问用户是否退出
                logger.warning("处理被中断")
                self.pause.set()
                try:
                    user_input = input("是否要退出处理？(y/n): ").strip().lower()
                    if user_input == 'y' or user_input == 'yes':
                        # 终止处理并清理资源
                        self._terminate_processes(processes)
                        return  # 提前退出函数
                    else:
                        self.pause.clear()
                        logger.info("用户取消退出，继续处理")
                        # 继续主循环，支持再次捕获Ctrl+C
                        continue
                except KeyboardInterrupt:
                    # 用户再次按下Ctrl+C，直接退出
                    logger.warning("用户再次按下Ctrl+C，强制退出")
                    self._terminate_processes(processes)
                    logger.info("已强制退出处理")
                    return  # 提前退出函数
                except Exception as e:
                    logger.error(f"处理中断请求时出现异常: {e}")
                    self._terminate_processes(processes)
                    logger.info("已强制退出处理")
                    return  # 提前退出函数

        end_time = time.time()
        logger.info(f"所有数据处理完成，共耗时 {end_time - start_time:.2f} 秒")
        self._terminate_processes(processes)
        logger.info("处理结束")
    
    def _process_results(self, result_handler):
        """处理结果，调用用户提供的回调函数"""
        while True:
            try:
                # 从队列中获取结果，设置超时以便定期检查是否需要退出
                result = self.results_queue.get(timeout=1)
                
                # 调用用户提供的结果处理函数
                if result_handler:
                    result_handler(result)
            except queue.Empty:
                if not self._result_queue_running:
                    break
            except Exception as e:
                logger.error(f"处理结果异常: {e}")


class IPScanner:
    """IP端口扫描器类，实现多进程多线程的TCP和UDP端口扫描"""
    
    def __init__(self, db_config=None, tcp_ports=None, udp_ports=None, show_port_config=True):
        """初始化IP扫描器
        
        Args:
            db_config: 数据库配置信息字典，用于在子进程中创建新的连接
            tcp_ports: 要扫描的TCP端口列表，默认为常用端口
            udp_ports: 要扫描的UDP端口列表，默认为常用端口
            show_port_config: 是否显示端口配置信息，默认为True
        """
        # 存储数据库配置，而不是直接存储连接对象
        self.db_config = db_config
        
        # 默认扫描的端口列表
        self.default_tcp_ports = [21, 22, 23, 25, 53, 80, 110, 143, 443, 465, 587, 993, 995, 1723, 3306, 3389, 5432, 8080, 8443]
        self.default_udp_ports = [53, 67, 68, 69, 123, 161, 162, 443]
        
        # 设置要扫描的端口
        self.tcp_ports = tcp_ports if tcp_ports else self.default_tcp_ports
        self.udp_ports = udp_ports if udp_ports else self.default_udp_ports
        
        # 初始化扫描参数
        self.timeout = 1  # 连接超时时间（秒）
        self.max_processes = multiprocessing.cpu_count()  # 进程数
        self.threads_per_process = 100  # 每个进程的线程数
        self.scan_speed = 100  # 扫描速度，每秒请求数
        self.retry_count = 2  # 扫描重试次数
        
        # 主进程的数据库连接
        self.db_connector = None
        
        # 结果队列和标志
        self._result_queue_running = True
        self.results_queue = multiprocessing.Queue()
        
        # 用于在多进程间共享的计数器和管理器
        self.manager = multiprocessing.Manager()
        
        # 用于在子进程中创建连接的模块路径
        self.db_module_path = 'src.core.database.DatabaseConnector'
        
        # 网络状态监控相关
        self.network_check_ip = '8.8.8.8'  # 默认检查的IP地址（Google DNS）
        self.network_check_interval = 5  # 网络检查间隔（秒）
        self.network_online = self.manager.Event()  # 网络在线状态标志
        self.network_online.clear()  # 默认假设网络是离线的，需要脚本开始时就检测
        # 暂停
        self.pause = self.manager.Event()  # 暂停标志
        self.pause.clear()  # 默认不暂停
        self.network_monitor_thread = None  # 网络监控线程
        self.check_alive_enabled = False  # 是否开启目标IP在线检测
        self._network_monitor_running = False  # 监控线程运行状态标志
        self._main_process_id = None  # 记录主进程ID

    def _start_network_monitor(self):
        """启动网络监控线程（仅在主进程中启动）"""
        # 只有在主进程中才启动网络监控线程
        if self._network_monitor_running or (self._main_process_id is not None and self._main_process_id != multiprocessing.current_process().pid):
            return
            
        # 记录主进程ID
        self._main_process_id = multiprocessing.current_process().pid
        
        def monitor_network():
            while self._network_monitor_running:
                try:
                    is_online = is_ip_online(self.network_check_ip)
                    
                    # 安全地更新网络状态
                    try:
                        if is_online and not self.network_online.is_set():
                            # 网络从离线变为在线
                            self.network_online.set()
                            logger.info(f"网络已恢复，继续扫描任务")
                        elif not is_online and self.network_online.is_set():
                            # 网络从在线变为离线
                            self.network_online.clear()
                            logger.warning(f"网络连接断开，请检查网络连接状态")
                    except Exception as status_error:
                        logger.error(f"更新网络状态异常: {status_error}")
                        # 发生异常时，尝试重新设置为在线状态，避免程序卡住
                        try:
                            self.network_online.set()
                        except:
                            pass
                except Exception as e:
                    logger.error(f"网络监控线程异常: {e}")
                    # 短暂暂停后继续，避免异常导致循环过快
                    time.sleep(1)
                
                # 等待一段时间后再次检查
                time.sleep(self.network_check_interval)
        
        # 创建并启动网络监控线程
        self._network_monitor_running = True
        self.network_monitor_thread = threading.Thread(target=monitor_network)
        self.network_monitor_thread.daemon = True  # 设置为守护线程，主程序结束时自动退出
        self.network_monitor_thread.start()
        
    def _stop_network_monitor(self):
        """停止网络监控线程"""
        self._network_monitor_running = False
        if self.network_monitor_thread:
            try:
                # 等待线程结束，最多等待1秒
                self.network_monitor_thread.join(timeout=1.0)
            except Exception as e:
                logger.error(f"停止网络监控线程异常: {e}")
             
    def scan_ips(self, ips):
        """扫描多个IP地址的端口
        
        Args:
            ips: IP地址列表，每个元素是包含ip和其他信息的字典
        """
        if not ips:
            print("没有需要扫描的IP地址")
            return
            
        # 启动网络监控线程，马上检测检测网络是否在线
        if is_ip_online(self.network_check_ip):
            self.network_online.set()
        else:
            logger.error("检测到网络未连接，如果为非公网环境，请通过参数 “--network-check-ip” 指定网络检测地址")
            return
        self._start_network_monitor()
        
        # 确保在主进程中有数据库连接
        if not self.db_connector:
            try:
                # 动态导入DatabaseConnector类
                module_path, class_name = self.db_module_path.rsplit('.', 1)
                module = __import__(module_path, fromlist=[class_name])
                db_connector_class = getattr(module, class_name)
                self.db_connector = db_connector_class()
                self.db_connector.connect()
            except Exception as e:
                logger.error(f"创建数据库连接失败: {e}")
                return
        
        logger.info(f"开始扫描 {len(ips)} 个IP地址")
        logger.info(f"扫描 {len(self.tcp_ports)} 个TCP端口")
        logger.info(f"扫描 {len(self.udp_ports)} 个UDP端口")
        logger.info("扫描开始...\n")

        # 创建处理结果的线程
        result_thread = threading.Thread(target=self._process_results)
        result_thread.daemon = True
        result_thread.start()
        
        # 进程池扫描IP
        start_time = time.time()
        
        # 将IP列表分块，每块分配给一个进程
        chunk_size = max(1, len(ips) // self.max_processes)
        ip_chunks = [ips[i:i+chunk_size] for i in range(0, len(ips), chunk_size)]
        
        # 准备传递给子进程的参数
        scan_params = {
            'timeout': self.timeout,
            'threads_per_process': self.threads_per_process,
            'scan_speed': self.scan_speed,
            'retry_count': self.retry_count,
            'tcp_ports': self.tcp_ports,
            'udp_ports': self.udp_ports,
            'check_alive': self.check_alive_enabled,
            'network_check_ip': self.network_check_ip
        }
        
        processes = []
        for chunk in ip_chunks:
            # 创建一个新的进程，传递可序列化的参数
            p = multiprocessing.Process(
                target=_process_scan_chunk_wrapper,
                args=(chunk, copy.deepcopy(scan_params), self.db_module_path, self.results_queue, self.pause, self.network_online, self.check_alive_enabled)
            )
            p.daemon = True  # 设置为守护进程，当主进程退出时自动终止
            processes.append(p)
            p.start()
        
        # 等待所有进程完成，但允许中断
        while True:
            try:
                # 检查是否所有进程都完成
                alive = [p for p in processes if p.is_alive()]
                if not alive:
                    break
                time.sleep(1)
            except KeyboardInterrupt:
                # 捕获Ctrl+C中断，询问用户是否退出
                logger.warning("扫描被中断")
                self.pause.set()
                try:
                    user_input = input("是否要退出扫描？(y/n): ").strip().lower()
                    if user_input == 'y' or user_input == 'yes':
                        # 终止扫描并清理资源
                        self._terminate_scan(processes)
                        return  # 提前退出函数
                    else:
                        self.pause.clear()
                        logger.info("用户取消退出，继续扫描")
                        # 继续主循环，支持再次捕获Ctrl+C
                        continue
                except KeyboardInterrupt:
                    # 用户再次按下Ctrl+C，直接退出
                    logger.warning("用户再次按下Ctrl+C，强制退出")
                    self._terminate_scan(processes)
                    logger.info("已强制退出扫描")
                    return  # 提前退出函数
                except Exception as e:
                    logger.error(f"处理中断请求时出现异常: {e}")
                    self._terminate_scan(processes)
                    logger.info("已强制退出扫描")
                    return  # 提前退出函数

        end_time = time.time()
        logger.info(f"所有IP扫描完成，共耗时 {end_time - start_time:.2f} 秒")
        self._terminate_scan(processes)
        logger.info("扫描结束")
    
    def _terminate_scan(self, processes):
        """终止扫描并清理资源"""
        logger.info("正在终止扫描进程并清理资源")
        # 终止所有子进程
        for p in processes:
            if p.is_alive():
                p.terminate()
        # 等待所有进程终止
        for p in processes:
            p.join(timeout=5)  # 最多等待5秒
        logger.info("所有扫描进程已终止")
        # 设置结果队列不再运行
        self._result_queue_running = False
        # 停止网络监控线程
        self._stop_network_monitor()
        # 关闭数据库连接
        if self.db_connector:
            try:
                self.db_connector.disconnect()
            except:
                pass
        
        # 停止网络监控线程
        self._stop_network_monitor()
        
        # 关闭主进程的数据库连接
        if self.db_connector:
            try:
                self.db_connector.disconnect()
            except:
                pass
        logger.info("所有资源已清理")
    
    def _scan_tcp_ports(self, ip, ip_id):
        """扫描TCP端口
        
        Args:
            ip: IP地址
            ip_id: IP在数据库中的ID
        """
        # 使用线程池扫描端口
        with ThreadPoolExecutor(max_workers=self.threads_per_process) as executor:
            futures = []
            for port in self.tcp_ports:
                futures.append(executor.submit(_scan_tcp_port_subprocess, ip, port, ip_id, {
                    'timeout': self.timeout,
                    'scan_speed': self.scan_speed,
                    'retry_count': self.retry_count
                }, self.results_queue))
            
            # 等待所有线程完成
            for future in futures:
                try:
                    future.result()
                except Exception as e:
                    logger.error(f"TCP端口扫描异常: {e}")
    
    def _scan_udp_ports(self, ip, ip_id):
        """扫描UDP端口
        
        Args:
            ip: IP地址
            ip_id: IP在数据库中的ID
        """
        # 使用线程池扫描端口
        with ThreadPoolExecutor(max_workers=self.threads_per_process) as executor:
            futures = []
            for port in self.udp_ports:
                futures.append(executor.submit(_scan_udp_port_subprocess, ip, port, ip_id, {
                    'timeout': self.timeout,
                    'scan_speed': self.scan_speed,
                    'retry_count': self.retry_count
                }, self.results_queue))
            
            # 等待所有线程完成
            for future in futures:
                try:
                    future.result()
                except Exception as e:
                    logger.error(f"UDP端口扫描异常: {e}")
    
    def _process_results(self):
        """处理扫描结果，将开放的端口记录到数据库"""
        while True:
            try:
                # 从队列中获取结果，设置超时以便定期检查是否需要退出
                result = self.results_queue.get(timeout=1)
                
                # 记录到数据库
                self._record_asset(result)
            except queue.Empty:
                if not self._result_queue_running:
                    break
            except Exception as e:
                logger.error(f"处理扫描结果异常: {e}")
    
    def _record_asset(self, result):
        """记录开放的端口资产到数据库
        
        Args:
            result: 扫描结果字典，包含ip, port, protocol, service等信息
        """
        max_retries = 3
        retry_count = 0
        success = False
        
        while retry_count < max_retries and not success:
            try:
                ip = result['ip']
                port = result['port']
                protocol = result['protocol']
                service = result['service']
                
                # 确保数据库连接有效
                if not self.db_connector.connection or not self.db_connector.connection.open:
                    logger.debug(f"数据库连接已关闭，尝试重新连接...")
                    self.db_connector.connect()
                    if not self.db_connector.connection:
                        logger.error(f"无法重新连接到数据库，等待重试...")
                        retry_count += 1
                        time.sleep(1)
                        continue
                
                # 检查资产是否已存在
                existing_asset = self.db_connector.fetchone(
                    "SELECT * FROM assets WHERE ip = %s AND port = %s AND asset_protocol = %s",
                    (ip, port, protocol)
                )
                
                if existing_asset:
                    # 更新现有资产
                    sql = """
                    UPDATE assets 
                    SET asset_fingerprint = %s, last_check_time = %s, update_time = %s 
                    WHERE asset_id = %s
                    """
                    self.db_connector.execute_query(
                        sql,
                        (service, datetime.now(), datetime.now(), existing_asset['asset_id'])
                    )
                    logger.info(f"已更新资产: {ip}:{port}/{protocol} ({service})")
                else:
                    # 插入新资产
                    sql = """
                    INSERT INTO assets (ip, port, asset_protocol, asset_fingerprint, is_valid, last_check_time, create_time, update_time) 
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
                    """
                    self.db_connector.execute_query(
                        sql,
                        (ip, port, protocol, service, 1, datetime.now(), datetime.now(), datetime.now())
                    )
                    logger.info(f"已添加资产: {ip}:{port}/{protocol} ({service})")
                
                success = True
            except pymysql.MySQLError as e:
                error_code, error_message = e.args
                logger.error(f"数据库操作异常: {e}")
                
                # 处理连接相关的错误
                if error_code in (2006, 2013, 2055, 2014):
                    # MySQL server has gone away, Lost connection, Lost connection to MySQL server, Commands out of sync
                    retry_count += 1
                    logger.info(f"连接错误，尝试重新连接 ({retry_count}/{max_retries})...")
                    self.db_connector.disconnect()
                    try:
                        self.db_connector.connect()
                    except Exception as connect_error:
                        logger.error(f"重新连接失败: {connect_error}")
                    time.sleep(1)
                else:
                    # 其他MySQL错误，不重试
                    logger.error(f"MySQL错误，不重试: {e}")
                    break
            except Exception as e:
                logger.error(f"记录资产异常: {e}")
                traceback.print_exc()
                break
        
        if not success:
            logger.error(f"记录资产失败，已达到最大重试次数 ({max_retries}次)")
    
    def set_scan_params(self, timeout=None, max_processes=None, threads_per_process=None, scan_speed=None, retry_count=None, check_alive=False, network_check_ip=None, quiet=False):
        """设置扫描参数
        
        Args:
            timeout: 连接超时时间（秒）
            max_processes: 最大进程数
            threads_per_process: 每个进程的线程数
            scan_speed: 扫描速度，每秒请求数
            retry_count: 扫描重试次数
            quiet: 是否静默设置，不打印日志
        """
        if timeout is not None:
            self.timeout = timeout
        if max_processes is not None:
            self.max_processes = max_processes
        if threads_per_process is not None:
            self.threads_per_process = threads_per_process
        if scan_speed is not None:
            self.scan_speed = scan_speed
        if retry_count is not None:
            self.retry_count = retry_count
        self.check_alive_enabled = check_alive
        if network_check_ip:
            self.network_check_ip = network_check_ip
        
        if not quiet:
            logger.info(f"已设置扫描参数: timeout={self.timeout}, max_processes={self.max_processes}, threads_per_process={self.threads_per_process}, scan_speed={self.scan_speed}, retry_count={self.retry_count}")
    
    def set_ports(self, tcp_ports=None, udp_ports=None, quiet=False):
        """设置要扫描的端口
        
        Args:
            tcp_ports: TCP端口列表
            udp_ports: UDP端口列表
            quiet: 是否静默设置，不打印日志
        """
        if tcp_ports is not None:
            self.tcp_ports = tcp_ports
        if udp_ports is not None:
            self.udp_ports = udp_ports
        
        if not quiet:
            logger.info(f"已设置扫描端口: TCP={self.tcp_ports}, UDP={self.udp_ports}")
            
    def set_network_check(self, check_ip=None, enable=True, check_interval=5):
        """设置网络检查参数
        
        Args:
            check_ip: 用于检查网络连接的IP地址
            enable: 是否启用网络检查
            check_interval: 检查间隔（秒）
        """
        if check_ip:
            self.network_check_ip = check_ip
        if enable:
            self.network_online.set()  # 重新启用网络检查
        else:
            self.network_online.set()  # 禁用时设为在线状态，不影响扫描
        self.network_check_interval = check_interval
        
        logger.info(f"已设置网络检查参数: check_ip={self.network_check_ip}, interval={self.network_check_interval}s")
        
    def set_check_alive(self, enable=True):
        """设置是否启用目标IP在线检测
        
        Args:
            enable: 是否启用目标IP在线检测
        """
        self.check_alive_enabled = enable
        logger.info(f"目标IP在线检测功能已{'启用' if enable else '禁用'}")

def _process_scan_chunk_wrapper(ip_chunk, scan_params, db_module_path, results_queue, pause=None, network_online=None, check_alive_enabled=False):
    """子进程的包装函数，用于在子进程中创建数据库连接
    
    Args:
        ip_chunk: IP地址块
        scan_params: 扫描参数字典
        db_module_path: 数据库连接器模块路径
        results_queue: 结果队列
        network_online: 多进程共享的网络在线状态标志
        check_alive_enabled: 是否启用目标IP在线检测
    """
    try:
        # 在子进程中设置忽略SIGINT信号，避免被Ctrl+C中断
        import signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        
        # 动态导入DatabaseConnector类
        module_path, class_name = db_module_path.rsplit('.', 1)
        module = __import__(module_path, fromlist=[class_name])
        db_connector_class = getattr(module, class_name)
        
        # 创建数据库连接
        db_connector = db_connector_class()
        db_connector.connect()
        
        # 处理每个IP
        for ip_info in ip_chunk:
            ip = ip_info['ip']
            ip_id = ip_info['ip_id']
            
            # 等待网络连接再扫描
            wait_continue(pause)
            wait_network_online(network_online, f"网络离线，暂停探测 {ip}")
            logger.info(f"开始扫描IP: {ip}")
            
            # 检查IP之前是否已扫描完成过
            try:
                ip_status = db_connector.fetchone("SELECT scan_status, port_scan_count FROM ips WHERE ip_id = %s", (ip_id,))
                previous_scan_status = ip_status.get('scan_status', 0) if ip_status else 0
                
                # 如果之前已扫描完成(scan_status=1)，设置为再次扫描(scan_status=2)
                if previous_scan_status == 1:
                    current_time = datetime.now()
                    db_connector.execute_query(
                        "UPDATE ips SET scan_status = 2, update_time = %s WHERE ip_id = %s",
                        (current_time, ip_id)
                    )
                    logger.info(f"IP {ip} 之前已扫描完成，现在再次进行扫描")
            except Exception as e:
                logger.error(f"检查IP {ip} 状态失败: {e}")
                
            # 开始扫描时间
            scan_start_time = datetime.now()
            
            try:
                # 检查目标IP是否在线
                if check_alive_enabled:
                    if not is_ip_online(ip):
                        logger.info(f"IP {ip} 不可达，跳过端口扫描")
                        # 标记为已扫描，但端口数为0
                        _mark_ip_scanned_subprocess(
                            ip_id, scan_start_time, db_connector, ip, 0, scan_params
                        )
                        continue  # 跳过端口扫描
                
                # 扫描TCP端口
                _scan_tcp_ports_subprocess(
                    ip, ip_id, scan_params, results_queue, pause, network_online
                )
                
                # 扫描UDP端口
                _scan_udp_ports_subprocess(
                    ip, ip_id, scan_params, results_queue, pause, network_online
                )
                
                # 计算本次扫描的总端口数（TCP + UDP）
                total_scanned_ports = max(len(scan_params['tcp_ports']), len(scan_params.get('udp_ports', [])))
                
                # 标记扫描完成
                _mark_ip_scanned_subprocess(
                    ip_id, scan_start_time, db_connector, ip, total_scanned_ports, scan_params
                )

            except Exception as e:
                logger.error(f"扫描IP {ip} 异常: {e}")
        
        # 关闭数据库连接
        db_connector.disconnect()
    except Exception as e:
        logger.error(f"子进程处理异常: {e}")
        # 注意：不再在异常处理中更新计数，因为每个IP的finally块已经处理了计数减少
        # 记录到失败日志
        with open('scan_failures.log', 'a') as f:
            f.write(f"{datetime.now()} - 子进程异常: {e}\n")
            f.write(f"  受影响的IP块大小: {len(ip_chunk) if ip_chunk else 0}\n")


def _scan_tcp_ports_subprocess(ip, ip_id, scan_params, results_queue, pause, network_online):
    """子进程中扫描TCP端口（使用线程池提高扫描速度）
    
    Args:
        ip: IP地址
        ip_id: IP在数据库中的ID
        scan_params: 扫描参数字典
        results_queue: 结果队列
    """
    # 子进程中需要单独导入模块
    import concurrent.futures
    # 使用线程池并发扫描TCP端口
    with concurrent.futures.ThreadPoolExecutor(max_workers=scan_params.get('threads_per_process', 10)) as executor:
        # 提交所有端口扫描任务
        futures = []
        for port in scan_params['tcp_ports']:
            future = executor.submit(_scan_tcp_port_subprocess, ip, port, ip_id, scan_params, results_queue, pause, network_online)
            futures.append(future)
        
        # 等待所有任务完成，捕获异常但不阻止其他任务
        for future in concurrent.futures.as_completed(futures):
            try:
                future.result()
            except Exception as e:
                logger.error(f"TCP端口扫描任务异常: {e}")


def _scan_tcp_port_subprocess(ip, port, ip_id, scan_params, results_queue, pause, network_online):
    """子进程中扫描单个TCP端口
    
    Args:
        ip: IP地址
        port: 端口号
        ip_id: IP在数据库中的ID
        scan_params: 扫描参数字典
        results_queue: 结果队列
    """
    
    # 实现扫描速度控制
    last_scan_time = getattr(_scan_tcp_port_subprocess, 'last_scan_time', None)
    if last_scan_time:
        time_between_scans = 1.0 / scan_params['scan_speed']
        elapsed = time.time() - last_scan_time
        if elapsed < time_between_scans:
            time.sleep(time_between_scans - elapsed)
    _scan_tcp_port_subprocess.last_scan_time = time.time()
    
    # 实现重试机制
    for attempt in range(scan_params['retry_count'] + 1):
        wait_continue(pause)
        wait_network_online(network_online, f"网络离线，暂停探测 {ip}:{port}/tcp")

        # 检查TCP端口是否开放
        if check_tcp_port(ip, port, scan_params['timeout']):
            # 端口开放
            service_name = get_service_name(port, 'tcp')
            results_queue.put({
                'ip': ip,
                'port': port,
                'protocol': 'tcp',
                'service': service_name,
                'ip_id': ip_id
            })
            break  # 成功后不再重试
        elif attempt < scan_params['retry_count']:
            # 重试前等待一段时间
            time.sleep(0.1)

def wait_continue(pause_event):
    if pause_event and pause_event.is_set():
        while pause_event.is_set():
            time.sleep(1)

def wait_network_online(network_online, outline_message=None, online_message=None):
    # 等待网络恢复（直到网络恢复或进程被终止）
    if network_online and not network_online.is_set():
        if outline_message:
            logger.error(outline_message)
        # 等待网络恢复，每隔1秒检查一次
        # 用户需求是等待网络恢复，不设置最大等待时间限制
        while not network_online.is_set():
            time.sleep(1)
        if online_message:
            logger.info(online_message)


def _process_data_chunk_wrapper(data_chunk, process_params, process_func, results_queue, pause, network_online):
    """子进程中处理数据块（使用线程池提高处理速度）
    
    Args:
        data_chunk: 要处理的数据块
        process_params: 处理参数字典
        process_func: 处理单个数据项的函数
        results_queue: 结果队列
        pause: 暂停事件对象
        network_online: 网络在线状态事件对象
    """
    try:
        # 在子进程中设置忽略SIGINT信号，避免被Ctrl+C中断
        import signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        
        logger.info(f"子进程开始处理数据块，包含 {len(data_chunk)} 个数据项")
        
        # 使用线程池并发处理数据项
        with concurrent.futures.ThreadPoolExecutor(max_workers=process_params.get('threads_per_process', 10)) as executor:
            # 提交所有数据处理任务
            futures = []
            for item in data_chunk:
                future = executor.submit(_process_data_item_wrapper, item, process_params, process_func, 
                                        results_queue, pause, network_online)
                futures.append(future)
            
            # 等待所有任务完成，捕获异常但不阻止其他任务
            for future in concurrent.futures.as_completed(futures):
                try:
                    future.result()
                except Exception as e:
                    logger.error(f"数据项处理任务异常: {e}")
        
        logger.info(f"子进程完成数据块处理，共处理 {len(data_chunk)} 个数据项")
    except Exception as e:
        logger.error(f"子进程处理异常: {e}")
        # 记录到失败日志
        with open('process_failures.log', 'a') as f:
            f.write(f"{datetime.now()} - 子进程异常: {e}\n")
            f.write(f"  受影响的数据块大小: {len(data_chunk) if data_chunk else 0}\n")


def _process_data_item_wrapper(item, process_params, process_func, results_queue, pause, network_online):
    """处理单个数据项的包装函数，包含重试机制和暂停控制
    
    Args:
        item: 要处理的数据项
        process_params: 处理参数字典
        process_func: 处理单个数据项的函数
        results_queue: 结果队列
        pause: 暂停事件对象
        network_online: 网络在线状态事件对象
    """
    # 实现重试机制
    for attempt in range(process_params['retry_count'] + 1):
        wait_continue(pause)
        wait_network_online(network_online, f"网络离线，暂停处理数据项")

        try:
            # 调用用户提供的处理函数
            result = process_func(item, process_params)
            
            # 如果处理函数返回结果且结果队列不为None，则将结果放入队列
            if result is not None and results_queue is not None:
                results_queue.put(result)
            
            break  # 成功后不再重试
        except Exception as e:
            if attempt < process_params['retry_count']:
                logger.warning(f"数据项处理失败，准备重试 ({attempt+1}/{process_params['retry_count']}): {e}")
                # 重试前等待一段时间
                time.sleep(0.1)
            else:
                logger.error(f"数据项处理失败，已达到最大重试次数 ({process_params['retry_count']}): {e}")


def _scan_udp_port_subprocess(ip, port, ip_id, scan_params, results_queue, pause, network_online):
    """子进程中扫描单个UDP端口
    
    Args:
        ip: IP地址
        port: 端口号
        ip_id: IP在数据库中的ID
        scan_params: 扫描参数字典
        results_queue: 结果队列
    """
    
    # 实现扫描速度控制
    last_scan_time = getattr(_scan_udp_port_subprocess, 'last_scan_time', None)
    if last_scan_time:
        time_between_scans = 1.0 / scan_params['scan_speed']
        elapsed = time.time() - last_scan_time
        if elapsed < time_between_scans:
            time.sleep(time_between_scans - elapsed)
    _scan_udp_port_subprocess.last_scan_time = time.time()
    
    # 实现重试机制
    for attempt in range(scan_params['retry_count'] + 1):
        wait_continue(pause)
        wait_network_online(network_online, f"网络离线，暂停探测 {ip}:{port}/udp")

        if check_udp_port(ip, port, scan_params['timeout']):
            # 端口开放
            service_name = get_service_name(port, 'udp')
            results_queue.put({
                'ip': ip,
                'port': port,
                'protocol': 'udp',
                'service': service_name,
                'ip_id': ip_id
            })
            break  # 成功后不再重试
        elif attempt < scan_params['retry_count']:
            # 重试前等待一段时间
            time.sleep(0.1)


def _scan_udp_ports_subprocess(ip, ip_id, scan_params, results_queue, pause, network_online):
    """子进程中扫描UDP端口（使用线程池提高扫描速度）
    
    Args:
        ip: IP地址
        ip_id: IP在数据库中的ID
        scan_params: 扫描参数字典
        results_queue: 结果队列
    """
    # 子进程中需要单独导入模块
    import concurrent.futures
    # 使用线程池并发扫描UDP端口
    with concurrent.futures.ThreadPoolExecutor(max_workers=scan_params.get('threads_per_process', 10)) as executor:
        # 提交所有端口扫描任务
        futures = []
        for port in scan_params['udp_ports']:
            future = executor.submit(_scan_udp_port_subprocess, ip, port, ip_id, scan_params, results_queue, pause, network_online)
            futures.append(future)
        
        # 等待所有任务完成，捕获异常但不阻止其他任务
        for future in concurrent.futures.as_completed(futures):
            try:
                future.result()
            except Exception as e:
                logger.error(f"UDP端口扫描任务异常: {e}")



def _mark_ip_scanned_subprocess(ip_id, scan_start_time, db_connector, ip, total_scanned_ports, scan_params):
    """子进程中标记IP为已扫描
    
    Args:
        ip_id: IP在数据库中的ID
        scan_start_time: 扫描开始时间
        db_connector: 数据库连接器实例
        ip: IP地址
        total_scanned_ports: 本次扫描的总端口数
    """
    try:
        current_time = datetime.now()

        set_sql = " update_time = %s"
        update_params = [current_time]

        # 检查是否扫描异常（进行ping检测的情况下，如果没有扫描任何端口，说明没ping通，直接不扫描）
        not_scan = scan_params['check_alive'] and total_scanned_ports == 0
        if not_scan:
            # 说明ping测试IP不可达
            set_sql += ", is_pingable = 0"
        else:
            if scan_params['check_alive']:
                # 说明是ping通过了才进行的检测
                set_sql += ", is_pingable = 1"
            
            # 添加扫描完成和扫描时间
            set_sql += ", scan_status = 1, last_scan_time = %s"
            update_params.append(scan_start_time)

            # 查询之前记录的最大端口数量
            try:
                previous_port_count = db_connector.fetchone("""
                    SELECT port_scan_count 
                    FROM ips 
                    WHERE ip_id = %s
                """, (ip_id,))
                previous_port_count = previous_port_count.get('port_scan_count', 0) if previous_port_count else 0
                
                # 只在本次扫描的端口数大于之前记录的最大端口数时更新
                if total_scanned_ports > previous_port_count:
                    set_sql += ", port_scan_count = %s"
                    update_params.append(total_scanned_ports)
            except Exception as e:
                logger.error(f"查询端口数量失败: {e}")
        
        update_sql = "UPDATE ips SET " + set_sql + " WHERE ip_id = %s"
        update_params.append(ip_id)

        # 尝试执行查询，如果连接断开则尝试重新连接
        try:
            db_connector.execute_query(
                update_sql, 
                update_params
            )
        except Exception as query_error:
            # 检查是否是连接错误
            if 'Lost connection' in str(query_error) or '2013' in str(query_error):
                logger.warning(f"数据库连接已断开，尝试重新连接: {query_error}")
                # 尝试重新连接
                try:
                    db_connector.disconnect()
                    db_connector.connect()
                    # 重新执行查询
                    db_connector.execute_query(
                        update_sql, 
                        update_params
                    )
                except Exception as reconnect_error:
                    logger.error(f"重新连接数据库失败: {reconnect_error}")
                    # 记录到文件或日志，稍后再处理
                    return
            else:
                # 其他错误，直接抛出
                raise
        if not not_scan:
            logger.info(f"IP {ip} 扫描完成，已标记为已扫描")
    except Exception as e:
        logger.error(f"标记IP {ip} 为已扫描异常: {e}")
        # 可以选择将失败的IP记录到文件或专门的表中，以便后续处理
        with open('scan_failures.log', 'a') as f:
            f.write(f"{datetime.now()} - 标记IP {ip} (ID: {ip_id}) 为已扫描失败: {e}\n")




if __name__ == "__main__":
    """测试IPScanner"""
    # 创建IPScanner实例（测试模式下不传入db_config，让它在运行时创建连接）
    scanner = IPScanner(show_port_config=False)
    
    # 设置网络检查参数
    print("启用网络检查功能，检查间隔为3秒...")
    scanner.set_network_check(check_ip='8.8.8.8', enable=True, check_interval=3)
    
    # 启用目标IP在线检测
    print("启用目标IP在线检测功能...")
    scanner.set_check_alive(enable=True)
    
    # 要扫描的IP地址（示例）
    test_ips = [
        {'ip': '8.8.8.8', 'ip_id': 1},
        {'ip': '1.1.1.1', 'ip_id': 2},
        {'ip': '192.168.1.100', 'ip_id': 3}  # 可能是局域网内不可达的IP
    ]
    
    # 扫描IP
    scanner.scan_ips(test_ips)
    
    # 测试完成后可以禁用网络检查
    # scanner.set_network_check(enable=False)
    # 测试完成后可以禁用目标IP在线检测
    # scanner.set_check_alive(enable=False)