import dpkt
import socket
import time
from datetime import datetime
import re
from typing import Optional

import re,struct, random, copy
from typing import List, Tuple, Optional



def center_text(text, width):
    """
    将文本在指定宽度内居中对齐
    处理Rich文本标签，计算实际显示宽度
    """
    import re
    
    # 移除Rich文本标签来计算实际显示宽度
    clean_text = re.sub(r'\[[^\]]*\]', '', text)
    actual_width = len(clean_text)
    
    # 如果实际宽度小于目标宽度，计算需要的padding
    if actual_width < width:
        padding_needed = width - actual_width
        left_padding = padding_needed // 2
        right_padding = padding_needed - left_padding
        
        # 在原始文本前后添加空格
        return ' ' * left_padding + text + ' ' * right_padding
    else:
        # 如果实际宽度大于等于目标宽度，直接返回
        return text

def format_count(count):
    """
    格式化大数字为人类可读的格式
    例如: 1234 -> 1.2K, 1234567 -> 1.2M, 1234567890 -> 1.2B
    """
    if count < 1000:
        return str(count)
    elif count < 1000000:
        return f"{count/1000:.1f}K"
    elif count < 1000000000:
        return f"{count/1000000:.1f}M"
    else:
        return f"{count/1000000000:.1f}B"





class PcapGenerator:
    def __init__(self, src_ip='192.168.1.100', dst_ip='192.168.1.101',
                 src_port=12345, dst_port=4433):
        """
        初始化PCAP生成器
        
        参数:
            src_ip: 源IP地址 (客户端)
            dst_ip: 目标IP地址 (服务器)
            src_port: 源端口
            dst_port: 目标端口 (默认TLS 4433)
        """
        self.src_ip = src_ip
        self.dst_ip = dst_ip
        self.src_port = src_port
        self.dst_port = dst_port
        self.seq = 0
        self.ack = 0
        self.timestamp = int(time.time())

    def _create_tcp_packet(self, data, is_client, flags=dpkt.tcp.TH_PUSH|dpkt.tcp.TH_ACK):
        """创建TCP数据包"""
        eth = dpkt.ethernet.Ethernet()
        if is_client:
            src_ip = self.src_ip
            dst_ip = self.dst_ip
            src_port = self.src_port
            dst_port = self.dst_port
        else:
            src_ip = self.dst_ip
            dst_ip = self.src_ip
            src_port = self.dst_port      
            dst_port = self.src_port     

        ip = dpkt.ip.IP(src=socket.inet_aton(src_ip),
                        dst=socket.inet_aton(dst_ip),
                        p=dpkt.ip.IP_PROTO_TCP)
        tcp = dpkt.tcp.TCP(sport=src_port,
                          dport=dst_port,
                          seq=self.seq,
                          ack=self.ack,
                          flags=flags)
        
        tcp.data = data
        ip.data = tcp
        ip.len += len(tcp.data)
        eth.data = ip
        
        # 更新序列号
        if flags & dpkt.tcp.TH_SYN:
            self.seq += 1
        else:
            self.seq += len(data) if data else 0
        
        return eth

    def save_to_pcap(self, messages, responses, filename='debug.pcap'):
        """
        将消息和响应保存为PCAP文件
        
        参数:
            messages: 发送的消息列表 (bytes列表)
            responses: 接收的响应列表 (bytes列表)
            filename: 输出的PCAP文件名
        """
        # 创建PCAP写入器
        with open(filename, 'wb') as f:
            pcap_writer = dpkt.pcap.Writer(f)
            
            pre_resp = responses[0]
            pcap_writer.writepkt(self._create_tcp_packet(pre_resp, False), ts=self.timestamp)

            # 2. 添加应用数据
            for msg, resp in zip(messages, responses[1:]):


                
                # 客户端发送
                self.timestamp += 0.2
                pcap_writer.writepkt(self._create_tcp_packet(msg, True) , ts=self.timestamp)
                
                # 服务器响应
                self.timestamp += 0.1
                pcap_writer.writepkt(self._create_tcp_packet(resp, False), ts=self.timestamp)
            
    def save_to_pcap2(self, messages, responses, filename='debug.pcap'):
        """
        将消息和响应保存为PCAP文件
        
        参数:
            messages: 发送的消息列表 (bytes列表)
            responses: 接收的响应列表 (bytes列表)
            filename: 输出的PCAP文件名
        """
        # 创建PCAP写入器
        with open(filename, 'wb') as f:
            pcap_writer = dpkt.pcap.Writer(f)
            


            # 2. 添加应用数据
            for msg, resp in zip(messages, responses):


                
                # 客户端发送
                self.timestamp += 0.2
                pcap_writer.writepkt(self._create_tcp_packet(msg, True) , ts=self.timestamp)
                
                # 服务器响应
                if len(resp) > 0:
                    self.timestamp += 0.1
                    pcap_writer.writepkt(self._create_tcp_packet(resp, False), ts=self.timestamp)
            
def get_cur_time_us():



    """高精度单调时钟（适用于性能分析）"""
    return int(time.perf_counter() * 1_000_000)

def extract_requests_tls( buf: bytes) -> List[bytearray]:
    
    """
    从字节缓冲区中提取TLS请求区域
    
    参数:
        buf: 输入的字节缓冲区，包含TLS记录
        
    返回:
        分割后的TLS消息列表，每个元素是一个完整的TLS记录(bytes)
    """
    messages = []
    pos = 0
    buf_size = len(buf)
    
    while pos + 5 <= buf_size:  # 至少需要5字节的TLS记录头
        # 解析TLS记录头
        content_type = buf[pos]
        version = buf[pos+1:pos+3]
        length = struct.unpack('!H', buf[pos+3:pos+5])[0]
        
        # 检查是否有足够的字节完成这个记录
        record_end = pos + 5 + length
        if record_end > buf_size:
            # 不完整的记录，将其余部分作为一个消息
            messages.append(buf[pos:])
            break
        
        # 提取完整的TLS记录
        messages.append(bytearray(buf[pos:record_end]))
        pos = record_end
    
    # 如果没有找到任何记录，返回整个缓冲区作为单个消息
    if not messages and buf:
        messages.append(buf)
    
    return messages

class Extra:
    def __init__(self, data: bytes):
        self.data = data
        self.len = len(data)
        self.hit = 0

def parse_extra_line(line: str, dict_level: int) -> Optional[bytes]:
    line = line.strip()
    if not line or line.startswith('#'):
        return None

    # 匹配 name[@level]="value" 格式
    match = re.match(
        r'^'                          # 行首
        r'([a-zA-Z_][a-zA-Z0-9_\-]*)' # 标签名：以字母或下划线开头，后跟字母、数字、下划线、短横线
        r'(@\d+)?'                    # 可选的 @level 部分
        r'\s*=\s*'                    # 等号，前后允许空格
        r'"((?:[^"\\]|\\.)*)"'        # 引号内的值，支持转义
        r'$',
        line
    )

    if not match:
        raise ValueError(f"Malformed line: {line}")

    label, level_str, value = match.groups()

    # 如果有 level 限制，并且当前等级不够，则跳过该条目
    if level_str:
        level = int(level_str[1:])  # 去掉 '@'
        if level > dict_level:
            return None

    # 处理转义字符
    result = bytearray()
    i = 0
    while i < len(value):
        c = value[i]
        if c == '\\':
            i += 1
            if i >= len(value):
                raise ValueError(f"Invalid escape at end of line: {line}")
            next_c = value[i]
            if next_c == '\\':
                result.append(ord('\\'))
            elif next_c == '"':
                result.append(ord('"'))
            elif next_c == 'x' and i + 2 < len(value):
                hex_str = value[i+1:i+3]
                if all(c in '0123456789abcdefABCDEF' for c in hex_str):
                    result.append(int(hex_str, 16))
                    i += 2
                else:
                    raise ValueError(f"Invalid hex escape in line: {line}")
            else:
                raise ValueError(f"Unsupported escape: \\{next_c} in line: {line}")
        else:
            if ord(c) < 32 or ord(c) >= 128:
                raise ValueError(f"Non-printable character in line: {line}")
            result.append(ord(c))
        i += 1

    return bytes(result)

def load_extras_file(fname: str, dict_level: int = 0, max_dict_file: int = 1024) -> List[Extra]:









    """
    从指定文件中加载 extras 字典。

    :param fname: 字典文件路径
    :param dict_level: 字典等级，用于筛选 @level 条目
    :param max_dict_file: 单个关键字最大长度限制
    :return: 解析后的 extras 列表
    """
    extras = []

    with open(fname, "r", encoding="utf-8") as f:
        for line_num, line in enumerate(f, start=1):
            line = line.rstrip('\n')

            # 去除前后空白 & 注释行
            stripped = line.strip()
            if not stripped or stripped.startswith('#'):
                continue

            try:
                parsed = parse_extra_line(stripped, dict_level)
                if parsed is None:
                    continue
                if len(parsed) > max_dict_file:
                    raise ValueError(f"Keyword too big in line {line_num}, limit is {max_dict_file}")
                extras.append(Extra(parsed))
            except Exception as e:
                raise ValueError(f"Error parsing line {line_num}: {e}")

    return extras

class Mutator:
    def __init__(self, extras = None, region_level_mutation = True):
        self.region_level_mutation = region_level_mutation



        self._rng = random.Random()
        
        self.extras = extras or []      # 用户指定的 extras
        self.a_extras = []  # 自动检测的 extras
        
        self.INTERESTING_8 = [-128, -1, 0, 1, 16, 32, 64, 100, 127]
        self.INTERESTING_16 = [-32768, -129, 128, 255, 256, 512, 1000, 1024, 4096, 32767]
        self.INTERESTING_32 = [
            -2147483648, -100663046, -32769, 32768, 
            65535, 65536, 100663045, 2147483647
        ]
        self.ARITH_MAX = 35
    
    def mutate(self, messages: List[bytearray], msg_idx: int) -> None:
        """执行变异操作"""
        mutation_funcs = {
            0: self.flip_single_bit,
            1: self.interesting_8,
            2: self.interesting_16,
            3: self.interesting_32,
            4: self.subtract_from_byte,
            5: self.add_from_byte,
            6: self.subtract_from_word,
            7: self.add_from_word,
            8: self.subtract_from_dword,
            9: self.add_from_dword,
            10: self.random_xor_byte,
            11: self.delete_bytes,
            12: self.delete_bytes,
            13: self.clone_or_insert_block,
            14: self.overwrite_bytes,
            15: self.overwrite_with_extra,
            16: self.insert_with_extra,
            17: self.overwrite_with_region,
            18: self.insert_with_region,
            19: self.insert_with_region2,
            20: self.duplicate_region
        }
        if not (isinstance(messages, list) and all(isinstance(m, bytearray) for m in messages)):
            print(f"Invalid messages: {messages}")
            raise TypeError("messages must be a List[bytearray]")
            
        msg = messages[msg_idx]
        # 根据 AFL 的概率分布选择变异方法
        choice = self._rng.randint(0, 15 + 2 + (4 if self.region_level_mutation else 0))
        choice = 0
        # print(msg)
        if choice in mutation_funcs:
            if choice <= 16:
                mutation_funcs[choice](msg)
            else:
                mutation_funcs[choice](messages, msg_idx)
        else:
            # 默认变异方法
            self.flip_single_bit(msg)
    

    def mutate_msg(self, msg: bytearray) -> None:
        """执行变异操作"""
        mutation_funcs = {
            0: self.flip_single_bit,
            1: self.interesting_8,
            2: self.interesting_16,
            3: self.interesting_32,
            4: self.subtract_from_byte,
            5: self.add_from_byte,
            6: self.subtract_from_word,
            7: self.add_from_word,
            8: self.subtract_from_dword,
            9: self.add_from_dword,
            10: self.random_xor_byte,
            11: self.delete_bytes,
            12: self.delete_bytes,
            13: self.clone_or_insert_block,
            14: self.overwrite_bytes,
            15: self.overwrite_with_extra,
            16: self.insert_with_extra,
        }

        # 根据 AFL 的概率分布选择变异方法
        choice = self._rng.randint(0, 16)

        # print(msg)
        if choice in mutation_funcs:

            mutation_funcs[choice](msg)


    

    def _choose_position(self, data: bytearray, width: int) -> int:
        """选择有效的变异位置"""
        if len(data) < width:
            raise ValueError(f"数据长度不足 {width} 字节")
        return self._rng.randint(0, len(data) - width)



    def keep(self, msg: bytearray) -> None:

        return



    def flip_single_bit(self, msg: bytearray) -> None:
        """翻转单个比特位"""

        bit_pos = self._rng.randint(0, len(msg) * 8 - 1)
        byte_pos = bit_pos // 8
        bit_in_byte = bit_pos % 8
        
        # 翻转指定的比特位
        msg[byte_pos] ^= (1 << (7 - bit_in_byte))






    def interesting_8(self, msg: bytearray) -> None:
        """ 用特殊值替换随机字节 """
        if len(msg) < 1:
            return
        pos = self._rng.randint(0, len(msg) - 1)
        msg[pos] = self._rng.choice(self.INTERESTING_8) & 0xFF  # 确保8位范围

    def interesting_16(self, data: bytearray) -> None:
        if len(data) < 2:
            return
        """16位特殊值变异（自动处理字节序）"""
        pos = self._rng.randint(0, len(data) - 2)
        value = self._rng.choice(self.INTERESTING_16)
        # 随机选择大端或小端
        endian = 'big' if self._rng.choice([True, False]) else 'little'
        data[pos:pos+2] = value.to_bytes(2, endian, signed=True)


    def interesting_32(self, data: bytearray) -> None:
        """32位特殊值变异（自动处理字节序）"""

        if len(data) < 4:
            return
        pos = self._rng.randint(0, len(data) - 4)
        value = self._rng.choice(self.INTERESTING_32)
        
        # 随机选择字节序
        endian = 'big' if self._rng.choice([True, False]) else 'little'
        data[pos:pos+4] = value.to_bytes(4, endian, signed=True)


    def subtract_from_byte(self, data: bytearray) -> None:
        """随机从字节中减去一个值（1到ARITH_MAX+1的范围）"""
        if not data:
            return
            
        pos = self._rng.randint(0, len(data) - 1)
        value = 1 + self._rng.randint(0, self.ARITH_MAX - 1)
        data[pos] = (data[pos] - value) % 256  # 确保结果在0-255范围内
        
    
    def add_from_byte(self, data: bytearray) -> None:
        """随机从字节中add去一个值（1到ARITH_MAX+1的范围）"""
        if not data:
            return
            
        pos = self._rng.randint(0, len(data) - 1)
        value = 1 + self._rng.randint(0, self.ARITH_MAX - 1)
        data[pos] = (data[pos] + value) % 256  # 确保结果在0-255范围内
        

    def subtract_from_word(self, data: bytearray) -> None:
        """
        随机从16位字中减去一个值（1到ARITH_MAX+1的范围）
        自动处理大端/小端字节序
        """
        if len(data) < 2:
            return

        # 随机选择位置（确保有2字节空间）
        pos = self._rng.randint(0, len(data) - 2)
        delta = 1 + self._rng.randint(0, self.ARITH_MAX - 1)

        endian = 'big' if self._rng.choice([True, False]) else 'little'
        # 交换字节序后操作
        value = int.from_bytes(data[pos:pos+2], endian, signed=False)
        value = (value - delta) & 0xFFFF
        data[pos:pos+2] = value.to_bytes(2, endian)


    def add_from_word(self, data: bytearray) -> None:
        """
        随机从16位字中减去一个值（1到ARITH_MAX+1的范围）
        自动处理大端/小端字节序
        """
        if len(data) < 2:
            return

        # 随机选择位置（确保有2字节空间）
        pos = self._rng.randint(0, len(data) - 2)
        delta = 1 + self._rng.randint(0, self.ARITH_MAX - 1)

        endian = 'big' if self._rng.choice([True, False]) else 'little'
        # 交换字节序后操作
        value = int.from_bytes(data[pos:pos+2], endian, signed=False)
        value = (value + delta) & 0xFFFF
        data[pos:pos+2] = value.to_bytes(2, endian)
            
    def subtract_from_dword(self, data: bytearray) -> None:
        """32位减法变异（简化版）"""
        if len(data) < 4:
            return

        pos = self._rng.randint(0, len(data) - 4)
        delta = 1 + self._rng.randint(0, self.ARITH_MAX - 1)
        endian = 'little' if self._rng.choice([True, False]) else 'big'

        value = int.from_bytes(data[pos:pos+4], endian, signed=False)
        value = (value - delta) & 0xFFFFFFFF  # 处理32位溢出
        data[pos:pos+4] = value.to_bytes(4, endian)


    def add_from_dword(self, data: bytearray) -> None:
        """32位减法变异（简化版）"""
        if len(data) < 4:
            return

        pos = self._rng.randint(0, len(data) - 4)
        delta = 1 + self._rng.randint(0, self.ARITH_MAX - 1)
        endian = 'little' if self._rng.choice([True, False]) else 'big'

        value = int.from_bytes(data[pos:pos+4], endian, signed=False)
        value = (value + delta) & 0xFFFFFFFF  # 处理32位溢出
        data[pos:pos+4] = value.to_bytes(4, endian)

    def random_xor_byte(self, data: bytearray) -> None:
        """
        随机选择一个字节与1-255之间的值进行异或
        （确保不会无操作，因为异或0是无变化的）
        """
        if not data:  # 空数据检查
            return
        
        pos = self._rng.randint(0, len(data) - 1)
        xor_value = 1 + self._rng.randint(0, 254)  # 1-255范围
        data[pos] ^= xor_value

    def delete_bytes(self, data: bytearray) -> None:
        """
        随机删除一段字节（比插入操作更频繁，以控制文件大小）
        遵循AFL的删除概率分布（倾向于中等长度删除）
        """
        if len(data) < 2:  # 至少需要2字节才能删除
            return

        # 计算要删除的长度（AFL的choose_block_len逻辑）
        max_len = min(len(data) - 1, 64)  # AFL默认最大删除64字节
        del_len = self._choose_block_len(max_len)
        
        # 随机选择删除起始位置
        del_from = self._rng.randint(0, len(data) - del_len)
        
        # 执行删除（用切片操作替代memmove）
        data[del_from:del_from + del_len] = b''

    def _choose_block_len(self, max_len: int) -> int:
        """
        模拟AFL的choose_block_len概率分布：
        - 短删除（1-8字节）更高概率（75%）
        - 长删除（最多64字节）较低概率（25%）

        如果 max_len < 8，则强制只使用短块模式。
        """
        if max_len < 1:
            raise ValueError("max_len must be at least 1")

        # 强制限制最大值为 64（符合 AFL 风格）
        max_len = min(max_len, 64)

        r = self._rng.random()

        # 如果 max_len < 8，只能选择短块
        if max_len < 8:
            return self._rng.randint(1, max_len)

        # 否则按照 AFL 概率选择短块或长块
        if r < 0.75:
            return self._rng.randint(1, min(8, max_len))
        else:
            return self._rng.randint(8, max_len)

    def clone_or_insert_block(self, data: bytearray, max_size: int = 1 * 1024 * 1024) -> None:
        """
        克隆或插入字节块（75%概率克隆现有数据，25%概率插入随机值）
        保持AFL的以下特性：
        - 克隆时从原数据中随机选取片段
        - 插入时生成随机值或重复单个字节
        - 总大小不超过max_size（默认1MB）
        """
        if len(data) >= max_size:  # 超过最大限制则不操作
            return

        # 决定是克隆(75%)还是插入新块(25%)
        actually_clone = self._rng.random() < 0.75

        if actually_clone:
            # 克隆现有数据块
            clone_len = self._choose_block_len(len(data))
            clone_from = self._rng.randint(0, len(data) - clone_len)
            block = data[clone_from:clone_from + clone_len]
        else:
            # 生成新块（50%概率用随机字节，50%用重复字节）
            clone_len = self._choose_block_len(64)  # AFL默认最大64字节
            if self._rng.random() < 0.5:
                block = bytes(self._rng.randint(0, 255) for _ in range(clone_len))
            else:
                block = bytes([self._rng.randint(0, 255)] * clone_len)

        # 随机选择插入位置
        clone_to = self._rng.randint(0, len(data))

        # 执行插入/克隆
        data[clone_to:clone_to] = block  # 插入到指定位置


    def overwrite_bytes(self, data: bytearray) -> None:
        """
        合并长度选择的字节覆盖变异函数
        功能：
        - 75%概率复制现有数据块
        - 25%概率用固定值覆盖
        - 自动处理所有边界情况
        """
        if len(data) < 2:
            return

        # 计算最大可用块长度（至少保留1字节）
        max_len = len(data) - 1
        if max_len < 1:
            return

        # 直接内联长度选择逻辑（原choose_block_len）
        if max_len <= 8 or self._rng.random() < 0.75:
            copy_len = self._rng.randint(1, min(8, max_len))
        else:
            copy_len = self._rng.randint(8, max_len)

        # 安全选择位置（捕获所有可能的计算错误）
        try:
            copy_from = self._rng.randint(0, len(data) - copy_len)
            copy_to = self._rng.randint(0, len(data) - copy_len)
        except ValueError:
            return

        # 执行覆盖操作
        if self._rng.random() < 0.75:  # 复制块
            if copy_from != copy_to:  # 避免无操作
                data[copy_to:copy_to+copy_len] = data[copy_from:copy_from+copy_len]
        else:  # 填充固定值
            fill_byte = (self._rng.randint(0, 255) if self._rng.random() < 0.5 
                        else data[self._rng.randint(0, len(data) - 1)])
            data[copy_to:copy_to+copy_len] = bytes([fill_byte] * copy_len)

    def overwrite_with_extra(self, msg: bytearray) -> None:
        """ 用 extras 中的某个条目覆盖 msg 中的部分内容 """
        if not self.extras and not self.a_extras:
            return  # 没有 extras 可用

        # 决定使用哪个 extras 列表
        use_a_extras = not self.extras or (self.a_extras and self._rng.random() < 0.5)

        extra_list = self.a_extras if use_a_extras else self.extras
        extra = self._rng.choice(extra_list)
        extra_len = extra.len

        if extra_len > len(msg):
            return  # extra 太长，无法插入

        insert_at = self._rng.randint(0, len(msg) - extra_len)
        msg[insert_at:insert_at + extra_len] = extra.data

    def insert_with_extra(self, msg: bytearray) -> None:
        """ 向 msg 中插入 extras 中的某个条目 """
        MAX_FILE = 1024 * 1024  # 假设最大文件大小限制为 1MB，可根据需要调整
        if not self.extras and not self.a_extras:
            return  # 没有 extras 可用

        # 决定使用哪个 extras 列表
        use_a_extras = not self.extras or (self.a_extras and self._rng.random() < 0.5)

        extra_list = self.a_extras if use_a_extras else self.extras
        extra = self._rng.choice(extra_list)
        extra_len = extra.len

        if len(msg) + extra_len >= MAX_FILE:
            return  # 超出最大长度限制

        insert_at = self._rng.randint(0, len(msg))
        msg[insert_at:insert_at] = extra.data  # 插入操作

    def overwrite_with_region(self,messages:List[bytearray], msg_idx:int)->None:
        while True:
            other_msg_idx = self._rng.randint(0,len(messages)-1)
            if other_msg_idx != msg_idx:
                break

        messages[msg_idx] = copy.deepcopy(messages[other_msg_idx])

    def insert_with_region(self,messages:List[bytearray], msg_idx:int)->None:
        while True:
            other_msg_idx = self._rng.randint(0, len(messages)-1)
            if other_msg_idx != msg_idx:
                break
        messages.insert(msg_idx,copy.deepcopy(messages[other_msg_idx]))

                                  

    def insert_with_region2(self,messages:List[bytearray], msg_idx:int)->None:
        while True:
            other_msg_idx = self._rng.randint(0, len(messages)-1)
            if other_msg_idx != msg_idx:
                break
        messages.insert(msg_idx+1,copy.deepcopy(messages[other_msg_idx]))

                                  

    def duplicate_region(self,messages:List[bytearray], msg_idx:int)->None:
        messages.insert(msg_idx,copy.deepcopy(messages[msg_idx]))
        messages_len = len(messages) 






from scapy.all import *
import glob,os,pickle

def str2list(lstr):
    lstr = lstr.strip('[]')
    lst = [out.strip("''") for out in lstr.split(', ')]
    return lst

def tuple_str2list(db_str):
    db_str = db_str.strip("()")
    db_str = [out.strip("''") for out in db_str.split(', ')]
    return db_str

def reassemble_udp_packets(packets):
    reassembled_packets = []
    ids = []
    i = -1
    while i < len(packets)-1:
        i += 1
        if IP in packets[i] and UDP in packets[i] and packets[i][UDP].dport == 500:
            ip = packets[i][IP]
            if ip.id in ids: # skip repeated ip packet
                continue
            ids.append(ip.id)
            udp = packets[i][UDP]
            reassembled_packet = IP(src=ip.src, dst=ip.dst) / UDP(sport=udp.sport, dport=udp.dport)
            udp_payload = raw(udp.payload)
            offset = len(ip.payload)
            
            # 查找分片
            while ip.flags == 'MF':
                i += 1
                ip = packets[i][IP]
                if ip.frag * 8 == offset:
                    udp_payload += raw(ip.payload)
                    offset += len(ip.payload)
                
            reassembled_packet /= udp_payload
            reassembled_packets.append(reassembled_packet)
    
    # 查找分片时可能有漏掉的udp包
    for i in range(len(packets)):
        if IP in packets[i] and UDP in packets[i] and packets[i][UDP].dport == 500:
            ip = packets[i][IP]
            if ip.id in ids: # skip repeated ip packet
                continue
            ids.append(ip.id) 
            reassembled_packets.append(packets[i])
        
    return reassembled_packets


def red_dot(dir1,version,happy_flow = None):
    if happy_flow == None:
        if version == 'v1':
            happy_flow = ["main_mode_1", "main_mode_2", "main_mode_3", "quick_mode_1_with_group", "quick_mode_2", "delete_ESP","delete_IKE"]
            happy_flow = ["main_mode_1", "main_mode_2", "main_mode_3", "quick_mode_1", "quick_mode_2", "delete_ESP","delete_IKE"]
        else:
            happy_flow = ["SAINIT_SA-KE-NONCE", "AUTH_IDi-AUTH-SA-TSi-TSr", "CHILDSA_SA-NONCE-TSi-TSr", "CHILDSA_RekeyIKE-KE-NONCE",  "CHILDSA_RekeySA-SA-NONCE-TSi-TSr", "INFO_DelChild", "INFO_DelIKE"]

    states = []
    paths = glob.glob(f"{dir1}/*.dot")

    model_path = os.path.join(dir1,'learned_model.pkl')

    with open(model_path,'rb') as f:
        learned_model = pickle.load(f)

    print(learned_model)

    states.append(learned_model.initial_state.state_id)
    for letter in happy_flow:
        learned_model.step(letter)
        states.append(learned_model.current_state.state_id)

    print(states)


    for path in paths:

        if os.path.basename(path)[-6:-4] == 'kp':
            continue

        save_path = path[:-4] + '_kp.dot'
        with open(path, 'r') as f:
            dot = f.read()
        for idx in range(len(states) - 1):
            s0 = states[idx]
            s1 = states[idx+1]
            str1 = f"{s0} -> {s1}  ["
            str2 = f"{s0} -> {s1}  [color = red,"
            if str2 not in dot:
                dot = dot.replace(str1,str2)

        with open(save_path,'w') as f:
            f.write(dot)

              
def simplfy_dot(dot_file:str):
    with open(dot_file, "r") as f:
        lines = f.readlines()

    dot = ''
    transs = []
    for line in lines:
        if 'digraph' in line:
            dot += f'{line}splines="line";\n'
        elif ('->' not in line) or ('start' in line):
            dot += line
        else:
            trans = line.split('[')[0]
            if trans in transs:
                continue
            transs.append(trans)
            new_line = trans + '[label="'
            for l in lines:
                if trans in l:
                    new_line += (l.split('label="')[1].split('"]')[0] + '\n')
            new_line += '"];\n'
            dot += new_line
    
    with open(dot_file.split('.dot')[0] + "_smv.dot", "w+") as f:
        [f.write(l) for l in lines] 
        
    with open(dot_file.split('.dot')[0] + "_simplfy.dot", "w+") as f:
        f.write(dot)
            