import PyPDF2
import json
from collections import OrderedDict
from scapy.all import *
from loguru import logger

def extract_text_from_pdf(input_pdf_file):
    """
    将pdf文件转换为txt文件,输入pdf文件名
    """
    output_text_file = input_pdf_file.replace('.pdf','.txt')
    try:
        # 打开PDF文件
        pdf_file = open(input_pdf_file, 'rb')
        # 创建一个PDF文件阅读器对象
        pdf_reader = PyPDF2.PdfReader(pdf_file)
        # 初始化一个空字符串来保存文本
        text = ""
        # 逐页读取PDF文件内容
        for page_num in range(len(pdf_reader.pages)):
            page = pdf_reader.pages[page_num]
            text += page.extract_text()
        # 关闭PDF文件
        pdf_file.close()
        # 将文本内容保存到TXT文件
        with open(output_text_file, 'w', encoding='utf-8') as txt_file:
            txt_file.write(text)
        logger.info(f"文本已保存到{output_text_file}")
    except Exception as e:
        logger.warning(f"发生错误：{e}")



def gen_json(txt_file):
    """
    读取txt文件转换为json格式
    """
    with open(txt_file, 'r') as file:
        lines = file.readlines()

    data = []
    current_entry = OrderedDict()  # 使用OrderedDict来保持插入顺序

    for index, line in enumerate(lines):
        line = line.strip()
        if line.startswith("Category/CVE ID"):
            current_entry["Category/CVE ID"] = lines[index + 1].strip()
        elif line.startswith("Source IP"):
            current_entry["Source IP"] = lines[index + 1].strip()
        elif line.startswith("Target IP"):
            current_entry["Target IP"] = lines[index + 1].strip()
            data.append(current_entry)
            current_entry = OrderedDict()  # 清空当前条目

    # 将提取的数据保存为JSON文件
    with open('output.json', 'w') as json_file:
        json.dump(data, json_file, indent=4)

    # 读取JSON文件
    with open('output.json', 'r') as file:
        data = json.load(file)

    # 计算重复的Category/CVE ID
    cve_id_counts = {}
    for entry in data:
        cve_id = entry.get("Category/CVE ID")
        if cve_id:
            if cve_id in cve_id_counts:
                cve_id_counts[cve_id] += 1
            else:
                cve_id_counts[cve_id] = 1

    # 打印重复的Category/CVE ID及其出现次数
    for cve_id, count in cve_id_counts.items():
        if count > 1:
            print(f"CVE ID: {cve_id} 重复了 {count} 次")
    
    unique_count = len(cve_id_counts)
    print(f'数据去重后, 共存在 {unique_count} 个测试项')

def modify_json():
    """
    需要转换为以源ip为键的字典,并对存在的attack进行排序改名,批量转换"-" to "_"
    """
    with open("output.json", "r") as file:
        data = json.load(file, object_pairs_hook=OrderedDict)

    source_ip_dict = OrderedDict()
    for item in data:
        source_ip = item.get("Source IP")
        category_cve_id = item.get("Category/CVE ID") 
        if source_ip in source_ip_dict:
            source_ip_dict[source_ip].append(category_cve_id)
        else:
            source_ip_dict[source_ip] = [category_cve_id]

    with open("output_2.json", "w") as output_file:
        json.dump(source_ip_dict, output_file, indent=4)

    edited_data = OrderedDict()
    attacks_count = {}

    def edit_value(ip, value):
        if "Attacks" in value:
            if ip in attacks_count:
                attacks_count[ip] += 1
            else:
                attacks_count[ip] = 1
            new_value = f"{ip}_Attacks_{attacks_count[ip]}"
            return new_value
        elif "," in value:
            return value.replace(',', '_')
        elif "-" in value:
            return 'cve_' + value.replace('-', '_')
        return value

    for ip in source_ip_dict:
        edited_data[ip] = [edit_value(ip, value) for value in source_ip_dict[ip]]

    with open('output_3.json', 'w') as file:
        json.dump(edited_data, file, indent=4)



def split_pcap_by_ip(pcap_file):
    """
    调用tshark,批量按照IP拆分文件
    """
    output_folder = 'sip_pcaps'
    os.makedirs(output_folder, exist_ok=True)
    with open('output_3.json','r') as file:
        data = json.load(file)
        logger.info(f'存在源IP数量 : {len(data.keys())}')
        for ip in (data.keys()):
            output_file = os.path.join(output_folder, f"{ip}.pcap")
            tshark_filter = f"ip.addr == {ip}"
            logger.info(f'[!] 开始过滤源ip: {ip} 的数据')
            split_command = f"tshark -r {pcap_file} -w {output_file} -Y '{tshark_filter}'"
            subprocess.run(split_command, shell=True)


def get_name(ip, index):
    """
    读取 JSON 文件输入源 IP 及下标，返回 filename_list
    """
    with open("output_3.json", 'r') as file:
        data = json.load(file)
        filename_list = data[ip]
        filename = filename_list[index]
        return filename       


def split_pcap_by_sessions(pcap_file):
    pcap_file2 ='sip_pcaps'+os.sep+pcap_file
    ip = pcap_file.strip('.pcap')
    packets = rdpcap(str(pcap_file2))
    max_time_diff=15
    current_session = []
    previous_time = None
    session_count = 0
    output_directory = 'cve_pcaps'
    os.makedirs(output_directory, exist_ok=True)
    subdirectory_path = os.path.join(output_directory, ip)
    os.makedirs(subdirectory_path, exist_ok=True)


    cve_name =get_name(ip,session_count)
    
    for packet in packets:
        if previous_time is None:
            previous_time = packet.time
            current_session.append(packet)
        else:
            time_diff = packet.time - previous_time
            if time_diff <= max_time_diff:
                current_session.append(packet)
            else:
                # Write the current session to a new PCAP file
                logger.info(f'[!] 当前解析文件 {pcap_file} 第{session_count+1} 个 ID :{get_name(ip,session_count)}')
                output_file = f"{output_directory}/{ip}/{get_name(ip,session_count)}.pcap"
                # logger.info(output_file)
                wrpcap(output_file, current_session)
                session_count += 1
                
                # Start a new session
                current_session = [packet]
            previous_time = packet.time
    
    # Write the last session to a new PCAP file
    if current_session:
        logger.info(f'[!] 当前解析文件 {pcap_file} 第{session_count+1} 个 ID : {get_name(ip,session_count)}')
        output_file = f"{output_directory}/{ip}/{get_name(ip,session_count)}.pcap"
        
        wrpcap(output_file, current_session)
    
    return session_count+1




if __name__ == '__main__' :

    input_file = 'report.pdf'
    pcap_file = 'cyberflood.pcap'
   
    max_time_diff = 15  # 设置时间间隔上限（秒）

    extract_text_from_pdf(input_file) 
    gen_json(input_file.replace('.pdf','.txt'))
    modify_json()
    #第一次才需要按ip切分
    # split_pcap_by_ip(pcap_file)
    for n in range(76,255):
        split_pcap_by_sessions(f'11.1.1.{n}.pcap')


###使用方法：配置input_file为输出的pdf文件、配置pcap_file为原始的抓包文件，配置rane运行即可