#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BPS文档和PCAP文件解析工具
用于解析BPS打流报文及其输出文档，将网络流量按会话分类保存
"""

import re
import csv
import time
import os
import argparse
import traceback
import threading
from queue import Queue
from pathlib import Path
from typing import Dict, List, Set, Tuple, Optional, Any
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor
import logging

from scapy.all import TCP, UDP, Ether, IP, ICMP, PcapReader, wrpcap

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

@dataclass
class Config:
    """配置类"""
    ETHER_TYPE_IPV4 = 2048
    PROTO_TCP = 6
    PROTO_UDP = 17
    QUEUE_SIZE = 10000
    QUEUE_REFILL_THRESHOLD = 0.7
    THREAD_COUNT = 4
    PROGRESS_INTERVAL = 1000
    OUTPUT_DIR = 'shellPcap'
    OTHER_PCAP_FILE = 'others_pcap.pcap'

class PcapParser:
    """PCAP文件解析器"""
    
    def __init__(self, config: Config):
        self.config = config
        self.session_file_map: Dict[str, str] = {}
        self.packet_queue = Queue(maxsize=config.QUEUE_SIZE)
        self.write_locks: Dict[str, threading.Lock] = {}
        self.main_lock = threading.Lock()
        
        # 预编译正则表达式
        self.patterns = {
            'ipv4': re.compile(r'[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}:\d+'),
            'session': re.compile(r'([\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}:\d+)->([\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}:\d+)'),
            'cve': re.compile(r'(cve \d+-\d+)', re.I),
            'http_name': re.compile(r'(.*?)[\s]?[(].*?http', re.I)
        }
        
        # 创建输出目录
        self.output_dir = Path(config.OUTPUT_DIR)
        self.output_dir.mkdir(exist_ok=True)
        self.other_file = self.output_dir / config.OTHER_PCAP_FILE
        
        # 计数器
        self.cve_counter = 1
        self.non_cve_counter = 1
    
    def _get_file_lock(self, filename: str) -> threading.Lock:
        """获取文件锁，如果不存在则创建"""
        if filename not in self.write_locks:
            with self.main_lock:
                if filename not in self.write_locks:
                    self.write_locks[filename] = threading.Lock()
        return self.write_locks[filename]
    
    def _make_session_bidirectional(self, sessions: List[Tuple[str, str]]) -> Set[str]:
        """创建双向会话集合"""
        bidirectional_sessions = set()
        for src, dst in sessions:
            bidirectional_sessions.add(f"{src}->{dst}")
            bidirectional_sessions.add(f"{dst}->{src}")
        return bidirectional_sessions
    
    def _parse_ip_ports_session(self, session_str: str) -> Tuple[str, Set[str]]:
        """解析IP端口会话信息"""
        if not self.patterns['ipv4'].search(session_str):
            logger.warning(f"可能是IPv6地址，暂不支持: {session_str}")
            raise ValueError(f"不支持的IP格式: {session_str}")
        
        sessions = self.patterns['session'].findall(session_str)
        if not sessions:
            raise ValueError(f"未解析到有效会话: {session_str}")
        
        primary_session = f"{sessions[0][0]}->{sessions[0][1]}"
        all_sessions = self._make_session_bidirectional(sessions)
        
        return primary_session, all_sessions
    
    def _sanitize_filename(self, filename: str) -> str:
        """清理文件名中的非法字符"""
        illegal_chars = ['\\', '/', '*', ':', '?', '"', '<', '>', "'"]
        for char in illegal_chars:
            filename = filename.replace(char, '_')
        return filename
    
    def _get_session_string(self, packet, protocol_class) -> str:
        """获取数据包的会话字符串"""
        if protocol_class in (TCP, UDP):
            return f"{packet[IP].src}:{packet[protocol_class].sport}->{packet[IP].dst}:{packet[protocol_class].dport}"
        else:
            return f"{packet[IP].src}:0->{packet[IP].dst}:0"
    
    def load_csv_data(self, csv_file: str) -> List[List[str]]:
        """加载CSV数据"""
        logger.info("正在读取CSV文件...")
        data = []
        
        try:
            with open(csv_file, 'r', encoding='utf-8') as file:
                reader = csv.reader(file)
                for row in reader:
                    if (len(row) > 4 and row[4] and 
                        'Allowed' in row[2] and '.' in row[0]):
                        data.append(row)
        except Exception as e:
            logger.error(f"读取CSV文件失败: {e}")
            raise
        
        logger.info(f"成功读取 {len(data)} 条记录")
        return data
    
    def build_session_mapping(self, csv_data: List[List[str]]) -> None:
        """构建会话到文件的映射"""
        logger.info("正在构建会话映射...")
        
        for idx, row in enumerate(csv_data, 1):
            try:
                if idx % 1000 == 0:
                    logger.info(f"处理进度: {idx}/{len(csv_data)}")
                
                # 检查是否包含CVE信息
                cve_match = self.patterns['cve'].search(row[3])
                primary_session, all_sessions = self._parse_ip_ports_session(row[5])
                
                # 生成文件名
                session_key = primary_session.replace(':', '_').replace('->', '_')
                
                if cve_match:
                    cve_name = cve_match.group(1)
                    filename = self._sanitize_filename(
                        f"{re.sub(r'[\s]+', '-', cve_name)}_{session_key}_{self.cve_counter}.pcap"
                    )
                    self.cve_counter += 1
                else:
                    name_match = self.patterns['http_name'].search(row[1])
                    if name_match:
                        base_name = name_match.group(1)
                        filename = self._sanitize_filename(
                            f"{base_name}_{session_key}_{self.non_cve_counter}.pcap"
                        )
                        self.non_cve_counter += 1
                    else:
                        logger.warning(f"无法提取名称信息: {row[1]}")
                        continue
                
                # 添加会话映射
                file_path = str(self.output_dir / filename)
                for session in all_sessions:
                    if session == "0.0.0.0:0->0.0.0.0:0":
                        continue
                    
                    if session in self.session_file_map:
                        logger.warning(f"会话冲突: {session}")
                        continue
                    
                    self.session_file_map[session] = file_path
                    
            except Exception as e:
                logger.error(f"处理第 {idx} 行数据时出错: {e}")
                logger.error(f"数据内容: {row}")
                continue
        
        logger.info(f"成功构建 {len(self.session_file_map)} 个会话映射")
    
    def _write_packet_to_file(self, filename: str, packet) -> None:
        """线程安全地写入数据包到文件"""
        file_lock = self._get_file_lock(filename)
        with file_lock:
            wrpcap(filename, packet, append=True)
    
    def _process_packet(self, packet_data: Tuple[int, Any]) -> None:
        """处理单个数据包"""
        idx, packet = packet_data
        
        # 检查是否为退出信号
        if packet == -1:
            return False
        
        try:
            # 基本格式检查
            if Ether not in packet:
                self._write_packet_to_file(str(self.other_file), packet)
                return True
            
            if IP not in packet:
                self._write_packet_to_file(str(self.other_file), packet)
                return True
            
            # 根据协议类型处理
            if (packet[Ether].type == self.config.ETHER_TYPE_IPV4 and 
                packet[IP].proto == self.config.PROTO_TCP):
                session_str = self._get_session_string(packet, TCP)
            elif (packet[Ether].type == self.config.ETHER_TYPE_IPV4 and 
                  packet[IP].proto == self.config.PROTO_UDP):
                session_str = self._get_session_string(packet, UDP)
            else:
                session_str = self._get_session_string(packet, ICMP)
            
            # 查找对应的输出文件
            target_file = self.session_file_map.get(session_str)
            if not target_file:
                self._write_packet_to_file(str(self.other_file), packet)
                return True
            
            # 写入到目标文件
            self._write_packet_to_file(target_file, packet)
            
            if idx % self.config.PROGRESS_INTERVAL == 0:
                logger.info(f"已处理数据包: {idx}")
                
        except Exception as e:
            logger.error(f"处理数据包 {idx} 时出错: {e}")
            logger.error(traceback.format_exc())
        
        return True
    
    def _packet_worker(self) -> None:
        """数据包处理工作线程"""
        while True:
            try:
                packet_data = self.packet_queue.get(timeout=1)
                if not self._process_packet(packet_data):
                    # 收到退出信号，重新放入队列供其他线程处理
                    self.packet_queue.put(packet_data)
                    break
                self.packet_queue.task_done()
            except:
                break
    
    def parse_pcap_file(self, pcap_file: str) -> None:
        """解析PCAP文件"""
        logger.info("开始解析PCAP文件...")
        
        # 启动工作线程
        threads = []
        for i in range(self.config.THREAD_COUNT):
            thread = threading.Thread(target=self._packet_worker, name=f"Worker-{i}")
            thread.daemon = True
            thread.start()
            threads.append(thread)
        
        start_time = time.time()
        packet_count = 0
        
        try:
            with PcapReader(pcap_file) as reader:
                for packet in reader:
                    # 队列满时等待
                    while self.packet_queue.qsize() >= self.config.QUEUE_SIZE:
                        time.sleep(0.1)
                    
                    packet_count += 1
                    self.packet_queue.put((packet_count, packet))
                    
                    if packet_count % self.config.PROGRESS_INTERVAL == 0:
                        logger.info(f"已读取数据包: {packet_count}, 队列大小: {self.packet_queue.qsize()}")
            
            # 发送退出信号
            for _ in range(self.config.THREAD_COUNT):
                self.packet_queue.put((0, -1))
            
            # 等待所有任务完成
            for thread in threads:
                thread.join()
            
            elapsed_time = time.time() - start_time
            logger.info(f"解析完成! 总共处理 {packet_count} 个数据包，耗时 {elapsed_time:.2f} 秒")
            
        except Exception as e:
            logger.error(f"解析PCAP文件时出错: {e}")
            raise

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='BPS文档和PCAP文件解析工具')
    parser.add_argument('-p', '--pcap', required=True, help='输入PCAP文件路径')
    parser.add_argument('-c', '--csv', required=True, help='输入CSV文件路径')
    parser.add_argument('--threads', type=int, default=4, help='工作线程数量')
    parser.add_argument('--output-dir', default='shellPcap', help='输出目录')
    
    args = parser.parse_args()
    
    # 检查输入文件
    if not os.path.exists(args.pcap):
        logger.error(f"PCAP文件不存在: {args.pcap}")
        return
    
    if not os.path.exists(args.csv):
        logger.error(f"CSV文件不存在: {args.csv}")
        return
    
    # 创建配置
    config = Config()
    config.THREAD_COUNT = args.threads
    config.OUTPUT_DIR = args.output_dir
    
    try:
        # 创建解析器并执行
        parser_instance = PcapParser(config)
        csv_data = parser_instance.load_csv_data(args.csv)
        parser_instance.build_session_mapping(csv_data)
        parser_instance.parse_pcap_file(args.pcap)
        
        logger.info("所有任务完成!")
        
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        logger.error(traceback.format_exc())
        return 1
    
    return 0

if __name__ == "__main__":
    exit(main())