import binascii

import scapy.packet
import pandas as pd
from scapy.layers.inet import TCP, IP
from scapy.utils import rdpcap
import re
import json

def parse_pcap(pcap_file):
    # 读取PCAP文件
    packets = rdpcap(pcap_file)
    hex_str=[]
    # 遍历所有数据包
    for packet in packets:
        # 检查是否为TCP数据包
        if packet.haslayer(TCP):
            # 提取TCP层信息
            tcp_layer = packet.getlayer(TCP)

            # 提取源IP、目标IP、源端口和目标端口
            # src_ip = packet[IP].src
            # dst_ip = packet[IP].dst
            src_port = tcp_layer.sport
            dst_port = tcp_layer.dport
            # print(tcp_layer)

            # 假设Kafka默认端口为9092（但您使用的是19092，这里保持原样）
            if dst_port == 19092 or src_port == 19092:
                # 提取负载数据（注意：这可能是加密的或压缩的）
                payload = packet[TCP].payload
                payload2 = bytes(packet[TCP].payload)
                hex_str.append(payload2)
                # print(payload2)
                # if payload is not None:
                #     print(type(payload))
                #     #尝试将负载解码为UTF-8字符串
                #     try:
                #         payload_str = payload2.decode('ASCII')
                #         print(f"Payload (as string): {payload_str}")
                #     except UnicodeDecodeError:
                #         print("Warning: Payload could not be decoded as UTF-8 string.")
                #
                #         # 无论是否解码成功，都可以打印负载的十六进制表示
                #     payload_hex = binascii.hexlify(payload2).decode('ASCII')
                #     if payload_hex is not None:
                #         hex_str.append(payload_hex)


                    # 打印其他信息
                # print(
                #     f"Source IP: {src_ip}, Destination IP: {dst_ip}, Source Port: {src_port}, Destination Port: {dst_port}, Payload Length: {len(payload)} bytes")
    return hex_str            # 使用示例


def decode_hex(hex_string):

    # 首先，你需要将16进制字符串转换回字节对象
    # 这可以通过bytes.fromhex()方法完成
    bytes_obj = bytes.fromhex(hex_string)

    # 然后，你可以尝试将这些字节解码为文本
    # 这取决于原始数据的编码方式。在这个例子中，我们假设它是UTF-8编码的
    try:
        text = bytes_obj.decode('ASCII')
        print(f"解码后的文本: {text}")
    except UnicodeDecodeError:
        print("解码失败，数据可能不是有效的UTF-8编码。")

    # 输出应该是: 解码后的文本: Hello World!

def decode_chuli(string):
    first_json_end = string.find('}},') + 1
    first_json_str = string[:first_json_end].strip() + "}"  # 添加缺失的闭合大括号（如果需要的话）
    second_json_str = string[first_json_end:].lstrip('},')  # 移除开头的逗号
    list_json6 = []
    if len(first_json_str) > 10:
        list_json6 = [first_json_str, second_json_str]
    return list_json6

def row_data():
    row_data = [
        data_dict.get("traceId", ""),
        data_dict.get("id", ""),
        data_dict.get("kind", ""),
        data_dict.get("name", ""),
        data_dict.get("timestamp", ""),
        data_dict.get("duration", ""),
        data_dict.get("localEndpoint", {}).get("serviceName", ""),
        data_dict.get("localEndpoint", {}).get("ipv4", ""),
        data_dict.get("remoteEndpoint", {}).get("ipv4", ""),
        data_dict.get("remoteEndpoint", {}).get("port", ""),
        data_dict.get("tags", {}).get("http.method", ""),
        data_dict.get("tags", {}).get("http.path", ""),
        data_dict.get("tags", {}).get("mvc.controller.class", ""),
        data_dict.get("tags", {}).get("mvc.controller.method", "")
    ]
    rows.append(row_data)




if __name__ == '__main__':
    pcap_file = 'C:\\Users\\11027\\Desktop\\kafka数据包\\kafka3.pcap'
    hex_list = parse_pcap(pcap_file)
    json_str_list = []
    json_str_dict = {}
    # 准备列名，这里手动定义，确保每个key都有对应的列
    columns = [
        "traceId", "id", "kind", "name", "timestamp", "duration",
        "localEndpoint.serviceName", "localEndpoint.ipv4",
        "remoteEndpoint.ipv4", "remoteEndpoint.port",
        "tags.http.method", "tags.http.path", "tags.mvc.controller.class", "tags.mvc.controller.method"
    ]

    # 初始化一个空的DataFrame
    # df = pd.DataFrame(columns=columns)
    rows = []
    for str_b in hex_list:
        # 编写正则表达式以匹配[]内的所有内容
        pattern = r'\[(.*?)\]'
        # 使用re.search查找匹配项
        match = re.search(pattern, str(str_b))
        if match:
            # 提取并解码匹配到的JSON对象字符串
            json_str = match.group(1)
            if not json_str.startswith("{"):
                # 找到第一个'{"traceId":'的位置
                start_index = json_str.find('{"traceId":')
                    # 找到紧接着'{"traceId":'之后的第一个'}}'的位置
                # 注意：这里我们使用start_index + len('{"traceId":')作为find的起始位置
                # 这样可以确保我们找到的是与'{"traceId":'相匹配的'}}'，而不是前面的其他'}}'
                end_index = json_str.find('}}', start_index + len('{"traceId":'))
                    # 提取'{"traceId":'和'}}'之间的内容（包括这两个标记）
                json_part = json_str[start_index:end_index + 2]  # 加2是因为要包含'}}'
                if json_part and not json_part.startswith("~"):
                    # json_str_list.append(json_part)
                    # 打印字符串的字符表示，以便检查不可见字符
                    # print(repr(json_part))
                    clean_json_str = json_part.strip()
                    data_dict = json.loads(clean_json_str)
                    data = data_dict
                    row_data()
            else:
                    list_json6 = decode_chuli(json_str)
                    for i in list_json6:
                        clean_json_str = i.strip()
                        try:
                            data_dict = json.loads(clean_json_str)
                            row_data()
                        except json.decoder.JSONDecodeError:
                            new_i = i.replace('\\xe9\\x9d\\x9e\\xe5\\xae\\xb9\\xe5\\x99\\xa8\\xe7\\x8e\\xaf\\xe5\\xa2\\x83',
                                              '非容器环境')
                            if '}},' in new_i:
                                json_decode = decode_chuli(new_i)
                                for j in json_decode:
                                    data_dict = json.loads(j)
                                    row_data()
                            else:
                                data_dict = json.loads(new_i)
                                row_data()

    df = pd.DataFrame(rows, columns=columns)
    # 写入Excel文件
    excel_path = 'C:\\Users\\11027\\Desktop\\kafka_test.xlsx'
    df.to_excel(excel_path, index=False, engine='openpyxl')
    print(f"数据已写入 {excel_path}")
