# #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
#
# """
# tcp_reassembly_http_chunked_state_machine.py
#
# 示例：对 PCAP 做 TCP 重组 + HTTP/1.1 解析 + Chunked (流式状态机) + GZIP 解压，
# 可避免多次回退导致 chunk-size 行混入 body 的问题。
#
# pip install scapy
#
# 核心思路：
#   1) TCP 重组：简单按 seq 拼接，处理轻度乱序。
#   2) HTTP 解析：对 request/response 头部与部分字段做解析。
#   3) 若 response header 声明 chunked，则使用 ChunkedParser（流式状态机），
#      每次有新数据就喂给 parser；parser 内部保留对 chunk-size、chunk-data、trailer 的解析状态，
#      不会在不够读时把 chunk-size 行又回退到 buffer，避免 "1f5a\r\n" 被多次解析而混入 body。
#   4) 若 Content-Encoding: gzip，则对得到的 body 做 gzip.decompress()。
#
# 注意：
#   - 仍是演示性质，若遇到极端的 TCP 重传、严重丢包等，可能还不足以与 Wireshark匹敌。
#   - 对 HTTPS 加密流量无能为力，需要先解密。
#   - 如果服务器还使用 deflate/br 等压缩，需要再做对应处理。
# """
#
# import re
# import gzip
# from collections import defaultdict
#
# from scapy.all import rdpcap, IP, TCP, Raw
#
#
# ###############################################################################
# # 正则判断 HTTP 请求行与响应行
# ###############################################################################
# REQUEST_LINE_RE = re.compile(rb"^(GET|POST|PUT|DELETE|OPTIONS|HEAD|PATCH)\s")
# RESPONSE_LINE_RE = re.compile(rb"^HTTP/\d\.\d\s\d{3}")
#
# CHUNK_SIZE_RE = re.compile(rb"^([0-9A-Fa-f]+)(;.*)?$")  # chunk-size 行
#
# ###############################################################################
# # 会话结构
# ###############################################################################
# # sessions[(ipA, portA, ipB, portB)] = {
# #   'client_ip': ...,
# #   'server_ip': ...,
# #   'c2s': {
# #       'segments': { seq -> payload },
# #       'next_seq': None,
# #       'reassembled': b'',
# #       'requests': []
# #   },
# #   's2c': {
# #       'segments': {},
# #       'next_seq': None,
# #       'reassembled': b'',
# #       'responses': []
# #   }
# # }
# ###############################################################################
# sessions = defaultdict(lambda: {
#     'client_ip': None,
#     'server_ip': None,
#     'c2s': {
#         'segments': {},
#         'next_seq': None,
#         'reassembled': b'',
#         'requests': []
#     },
#     's2c': {
#         'segments': {},
#         'next_seq': None,
#         'reassembled': b'',
#         'responses': []
#     }
# })
#
#
# ###############################################################################
# # 1. TCP会话 key
# ###############################################################################
# def get_session_key(ip_src, sport, ip_dst, dport):
#     a = (ip_src, sport)
#     b = (ip_dst, dport)
#     return (a, b) if a < b else (b, a)
#
#
# ###############################################################################
# # 2. TCP 重组（简易）
# ###############################################################################
# def store_tcp_segment(direction, seq, payload):
#     segs = direction['segments']
#     if seq in segs:
#         # 重复段
#         pass
#     segs[seq] = payload
#
#     if direction['next_seq'] is None:
#         direction['next_seq'] = seq
#
#     reassembled = direction['reassembled']
#     nxt = direction['next_seq']
#
#     # 依序拼接
#     while nxt in segs:
#         pay = segs[nxt]
#         reassembled += pay
#         del segs[nxt]
#         nxt += len(pay)
#
#     direction['reassembled'] = reassembled
#     direction['next_seq'] = nxt
#
#
# ###############################################################################
# # 3. 流式 chunked 解析器：ChunkedParser(STATE MACHINE)
# ###############################################################################
# class ChunkedParser:
#     """
#     一个流式的 chunked 状态机：
#       - 依次解析 chunk-size 行，读取 chunk-data，直到 last-chunk (size=0)，再解析 trailer。
#       - 不够数据时，就暂停，下次再来更多字节继续 parse。
#       - 最终得到 (collected_body, trailer_str)。
#     """
#     STATE_CHUNK_SIZE = 1
#     STATE_CHUNK_DATA = 2
#     STATE_CHUNK_DATA_CRLF = 3
#     STATE_LAST_CHUNK_TRAILER = 4
#     STATE_DONE = 5
#
#     def __init__(self):
#         self.state = self.STATE_CHUNK_SIZE
#         self.pending_buffer = b""  # 当前未处理的字节
#         self.current_chunk_size = 0
#         self.bytes_needed = 0
#         self.collected_body = bytearray()
#         self.trailer_str = ""
#
#     def feed_data(self, new_data: bytes):
#         """
#         把新的TCP字节流喂进来，循环驱动状态机，直到不再能继续解析
#         """
#         self.pending_buffer += new_data
#         while True:
#             if self.state == self.STATE_CHUNK_SIZE:
#                 # 尝试读一行 chunk-size
#                 line_end = self.pending_buffer.find(b"\r\n")
#                 if line_end == -1:
#                     # 不够一行
#                     break
#                 line = self.pending_buffer[:line_end]
#                 self.pending_buffer = self.pending_buffer[line_end+2:]
#                 m = CHUNK_SIZE_RE.match(line)
#                 if not m:
#                     # 出错
#                     raise ValueError(f"chunk-size 行不匹配: {line}")
#                 hex_size_str = m.group(1)
#                 chunk_size = int(hex_size_str, 16)
#                 if chunk_size == 0:
#                     # last-chunk => 进入 STATE_LAST_CHUNK_TRAILER
#                     self.state = self.STATE_LAST_CHUNK_TRAILER
#                 else:
#                     self.current_chunk_size = chunk_size
#                     self.state = self.STATE_CHUNK_DATA
#                     self.bytes_needed = chunk_size
#
#             elif self.state == self.STATE_CHUNK_DATA:
#                 # 需要 chunk_size 字节
#                 if len(self.pending_buffer) < self.bytes_needed:
#                     # 不够 chunk-data
#                     break
#                 chunk_data = self.pending_buffer[:self.bytes_needed]
#                 self.pending_buffer = self.pending_buffer[self.bytes_needed:]
#                 self.collected_body.extend(chunk_data)
#                 # 读完 data 后应该有 \r\n
#                 self.state = self.STATE_CHUNK_DATA_CRLF
#
#             elif self.state == self.STATE_CHUNK_DATA_CRLF:
#                 # 需要看2字节 "\r\n"
#                 if len(self.pending_buffer) < 2:
#                     break
#                 if self.pending_buffer[:2] != b"\r\n":
#                     raise ValueError("chunk-data后缺少 \\r\\n")
#                 self.pending_buffer = self.pending_buffer[2:]
#                 # 下一个 chunk
#                 self.state = self.STATE_CHUNK_SIZE
#
#             elif self.state == self.STATE_LAST_CHUNK_TRAILER:
#                 # 不断读 trailer 行，直到空行
#                 line_end = self.pending_buffer.find(b"\r\n")
#                 if line_end == -1:
#                     # 不够
#                     break
#                 trailer_line = self.pending_buffer[:line_end]
#                 self.pending_buffer = self.pending_buffer[line_end+2:]
#                 if trailer_line == b'':
#                     # 说明 trailer 结束
#                     self.state = self.STATE_DONE
#                     break
#                 else:
#                     # 收集 trailer
#                     self.trailer_str += trailer_line.decode("utf-8", errors="ignore") + "\r\n"
#
#             elif self.state == self.STATE_DONE:
#                 # 已完成 chunked
#                 break
#             else:
#                 # 未知状态
#                 break
#
#     def is_done(self):
#         return self.state == self.STATE_DONE
#
#     def get_result(self):
#         """
#         返回 (body_bytes, trailer_str) 若解析完成，否则 None
#         """
#         if self.state == self.STATE_DONE:
#             return (bytes(self.collected_body), self.trailer_str)
#         else:
#             return None
#
#
# ###############################################################################
# # 4. HTTP 报文结构 & 解析状态
# ###############################################################################
# class HttpMessageParser:
#     """
#     在每个方向 (请求/响应) 上，都配一个 HttpMessageParser，用来：
#       - 先读 header
#       - 若 chunked，则创建 ChunkedParser
#       - 若 content-length，则按长度读 body
#       - 最终得到一次完整 HTTP 报文
#     """
#     STATE_HEADER = 1
#     STATE_BODY = 2
#     STATE_DONE = 3
#
#     def __init__(self, is_request=True):
#         self.is_request = is_request
#         self.state = self.STATE_HEADER
#         self.header_buffer = bytearray()
#         self.headers_parsed = False
#
#         # 解析结果
#         self.start_line = b""
#         self.header_lines = []
#         self.transfer_encoding = None
#         self.content_length = None
#         self.method_is_head = False
#         self.no_body_status = False
#
#         self.body_collector = bytearray()  # 对于 content-length
#         self.body_bytes_needed = 0
#
#         self.chunked_parser = None
#
#         self.complete_message = None
#
#     def feed_data(self, data: bytes):
#         """
#         喂增量数据，尝试解析 header/body. 可能一次产生一个完整http message，也可能需要多次.
#         如果解析出一个完整报文，就放到 self.complete_message，然后会重置，以便解析下一个报文。
#         """
#         pos = 0
#         length = len(data)
#         while pos < length:
#             if self.state == self.STATE_HEADER:
#                 # 收集 header
#                 self.header_buffer.append(data[pos])
#                 pos += 1
#
#                 # 看是否出现 \r\n\r\n
#                 if len(self.header_buffer) >= 4 and self.header_buffer[-4:] == b"\r\n\r\n":
#                     # 解析headers
#                     self._parse_header()
#                     if self.state == self.STATE_DONE:
#                         # 说明无body，如 HEAD/204/304
#                         self._finish_message()
#                     else:
#                         # 进入 BODY 阶段
#                         pass
#
#             elif self.state == self.STATE_BODY:
#                 if self.transfer_encoding == b"chunked":
#                     # 交给 chunked_parser
#                     need_feed = data[pos:]
#                     pos = length
#                     self.chunked_parser.feed_data(need_feed)
#                     if self.chunked_parser.is_done():
#                         # 拿到 body
#                         body, trailer = self.chunked_parser.get_result()
#                         # 拼成完整报文
#                         self._finish_message(body, trailer)
#                 elif self.content_length is not None:
#                     # 普通 content-length
#                     can_take = min(self.body_bytes_needed, length - pos)
#                     self.body_collector.extend(data[pos: pos+can_take])
#                     pos += can_take
#                     self.body_bytes_needed -= can_take
#                     if self.body_bytes_needed == 0:
#                         # body读完
#                         self._finish_message(self.body_collector)
#                 else:
#                     # 无 body
#                     # 不应该进入这个分支
#                     pos = length
#             else:
#                 # state=STATE_DONE? 说明上一条报文刚完成，但还可能有下一条报文
#                 # 重置一下，准备解析下一条报文
#                 self._reset()
#                 continue
#         # end while pos < length
#
#     def has_complete_message(self):
#         return (self.complete_message is not None)
#
#     def pop_complete_message(self):
#         """获取解析好的一条HTTP报文，并清空complete_message"""
#         msg = self.complete_message
#         self.complete_message = None
#         return msg
#
#     def _reset(self):
#         self.state = self.STATE_HEADER
#         self.header_buffer = bytearray()
#         self.headers_parsed = False
#         self.start_line = b""
#         self.header_lines = []
#         self.transfer_encoding = None
#         self.content_length = None
#         self.method_is_head = False
#         self.no_body_status = False
#         self.body_collector = bytearray()
#         self.body_bytes_needed = 0
#         self.chunked_parser = None
#
#     def _parse_header(self):
#         # 找到 header_end = len(self.header_buffer)-4
#         header_part = self.header_buffer[:-4]  # 去掉 \r\n\r\n
#         lines = header_part.split(b"\r\n")
#         self.start_line = lines[0]
#         self.header_lines = lines[1:]
#
#         # 判断 HEAD / no-body
#         if self.is_request:
#             # 判断 method HEAD
#             method = self.start_line.split(b" ",1)[0].upper()
#             if method == b"HEAD":
#                 self.method_is_head = True
#         else:
#             # response，看状态码
#             parts = self.start_line.split(b" ")
#             if len(parts) >= 2:
#                 try:
#                     code = int(parts[1])
#                     if 100 <= code < 200 or code in (204,304):
#                         self.no_body_status = True
#                 except:
#                     pass
#
#         # 解析header字段
#         for hl in self.header_lines:
#             lower_hl = hl.lower()
#             if lower_hl.startswith(b"content-length:"):
#                 val = hl.split(b":",1)[1].strip()
#                 try:
#                     self.content_length = int(val)
#                 except:
#                     self.content_length = None
#             elif lower_hl.startswith(b"transfer-encoding:"):
#                 # chunked
#                 if b"chunked" in lower_hl:
#                     self.transfer_encoding = b"chunked"
#
#         # 判断是否无body
#         if self.method_is_head or self.no_body_status:
#             # 直接完成
#             self.state = HttpMessageParser.STATE_DONE
#             return
#         # 否则进入 body
#         if self.transfer_encoding == b"chunked":
#             self.chunked_parser = ChunkedParser()
#             self.state = self.STATE_BODY
#         elif self.content_length is not None:
#             self.body_bytes_needed = self.content_length
#             self.state = self.STATE_BODY
#         else:
#             # 无 content-length / chunked => 无body
#             self.state = HttpMessageParser.STATE_DONE
#
#     def _finish_message(self, body=None, trailer=None):
#         """
#         拼出完整报文 (header + body + trailer)
#         """
#         if body is None:
#             body = b""
#         if trailer is None:
#             trailer = ""
#
#         header_bytes = self.header_buffer  # 包含了 headers + \r\n\r\n
#         message_bytes = bytes(header_bytes) + body
#         # 若想把 trailer 也附加:
#         if trailer:
#             message_bytes += b"\r\n" + trailer.encode('utf-8', errors='ignore') + b"\r\n"
#         self.complete_message = message_bytes
#         self.state = HttpMessageParser.STATE_DONE
#
#
# ###############################################################################
# # 5. 在 TCP流上封装了 HttpParserForDirection
# ###############################################################################
# class HttpParserForDirection:
#     """
#     封装 “多条 HTTP 报文” 的解析器：
#       - 可能一条TCP流中有多个 HTTP 报文（比如 pipeline 或连续请求等）。
#       - 这里就循环使用 HttpMessageParser（若上一条报文完成，就初始化新的parser）。
#     """
#     def __init__(self, is_request=True):
#         self.is_request = is_request
#         self.current_parser = HttpMessageParser(is_request=is_request)
#         self.messages = []
#
#     def feed(self, data: bytes):
#         # 不断喂数据给 current_parser，若其完成报文，则取出，存入 self.messages，再创建新的 parser。
#         pos = 0
#         length = len(data)
#         while pos < length:
#             chunk = data[pos:]
#             before_len = len(self.current_parser.header_buffer) + (len(self.current_parser.body_collector) if self.current_parser.body_collector else 0)
#             self.current_parser.feed_data(chunk)
#             after_len = len(self.current_parser.header_buffer) + (len(self.current_parser.body_collector) if self.current_parser.body_collector else 0)
#             consumed = (before_len + len(chunk)) - after_len  # 大概表示解析消耗量
#
#             # 由于我们不做精确pos移动，这里做个简化估计
#             # 或者索性 pos=length 退出，因为 parser 内部就是增量处理
#             pos = length
#
#             # 若完成报文，则存储
#             while self.current_parser.has_complete_message():
#                 msg_bytes = self.current_parser.pop_complete_message()
#                 self.messages.append(msg_bytes)
#                 # 可能还要解析下一条
#                 self.current_parser = HttpMessageParser(is_request=self.is_request)
#
#
# ###############################################################################
# # 6. 处理PCAP, 做TCP重组 + HTTP解析 (带 chunked state-machine)
# ###############################################################################
# def process_pcap(file_path):
#     pkts = rdpcap(file_path)
#
#     for pkt in pkts:
#         if not (IP in pkt and TCP in pkt):
#             continue
#         if not pkt.haslayer(Raw):
#             continue
#
#         ip_src = pkt[IP].src
#         ip_dst = pkt[IP].dst
#         sport = pkt[TCP].sport
#         dport = pkt[TCP].dport
#         seq = pkt[TCP].seq
#         payload = bytes(pkt[Raw].load)
#
#         session_key = get_session_key(ip_src, sport, ip_dst, dport)
#         sd = sessions[session_key]
#
#         # 若还没确定client/server
#         if sd['client_ip'] is None or sd['server_ip'] is None:
#             if REQUEST_LINE_RE.match(payload.lstrip()):
#                 sd['client_ip'] = ip_src
#                 sd['server_ip'] = ip_dst
#             elif RESPONSE_LINE_RE.match(payload.lstrip()):
#                 sd['client_ip'] = ip_dst
#                 sd['server_ip'] = ip_src
#
#         # 判断方向
#         if ip_src == sd['client_ip']:
#             direction = sd['c2s']
#             is_request = True
#         else:
#             direction = sd['s2c']
#             is_request = False
#
#         store_tcp_segment(direction, seq, payload)
#
#     # 处理完所有包后，每个方向的 reassembled 就拿到完整 TCP 流
#     # 接下来做 HTTP 解析
#     for k, sd in sessions.items():
#         # 给 c2s / s2c 分别做 HTTP parser
#         sd['c2s']['http_parser'] = HttpParserForDirection(is_request=True)
#         sd['s2c']['http_parser'] = HttpParserForDirection(is_request=False)
#
#         sd['c2s']['http_parser'].feed(sd['c2s']['reassembled'])
#         sd['s2c']['http_parser'].feed(sd['s2c']['reassembled'])
#
#         sd['c2s']['requests'] = sd['c2s']['http_parser'].messages
#         sd['s2c']['responses'] = sd['s2c']['http_parser'].messages
#
#
# ###############################################################################
# # 7. 从完整HTTP报文中拆分 headers / body
# ###############################################################################
# def extract_headers_and_body(msg_bytes: bytes):
#     header_end = msg_bytes.find(b"\r\n\r\n")
#     if header_end == -1:
#         return None, None, {}
#     header_section = msg_bytes[:header_end]
#     body_bytes = msg_bytes[header_end+4:]
#
#     header_str = header_section.decode("utf-8", errors="replace")
#     lines = header_str.split("\r\n")
#     header_dict = {}
#
#     # 首行 = lines[0]
#     for l in lines[1:]:
#         if ":" in l:
#             k, v = l.split(":",1)
#             header_dict[k.strip().lower()] = v.strip()
#
#     return header_str, body_bytes, header_dict
#
#
# ###############################################################################
# # 8. 打印结果 并自动 gzip 解压
# ###############################################################################
# def print_http_flows():
#     for (k1,k2), sd in sessions.items():
#         c2s = sd['c2s']
#         s2c = sd['s2c']
#         reqs = c2s['requests']
#         resps = s2c['responses']
#
#         if not reqs and not resps:
#             continue
#
#         print("="*80)
#         print(f"Session: (({k1[0]}:{k1[1]}), ({k2[0]}:{k2[1]}))")
#         print(f"   Client IP: {sd['client_ip']}")
#         print(f"   Server IP: {sd['server_ip']}")
#         print("-"*80)
#
#         # 简单按下标配对
#         for i, req_data in enumerate(reqs):
#             print("[REQUEST]")
#             print(req_data.decode("utf-8", errors="replace"))
#             print("-"*40)
#             if i < len(resps):
#                 print("[RESPONSE]")
#                 head_str, raw_body, header_dict = extract_headers_and_body(resps[i])
#                 if head_str is None:
#                     print(resps[i].decode("utf-8", errors="replace"))
#                 else:
#                     print(head_str)
#                     print("-- BODY (maybe compressed) --")
#                     encoding = header_dict.get('content-encoding','').lower()
#                     if 'gzip' in encoding:
#                         # gzip 解压
#                         try:
#                             decompressed = gzip.decompress(raw_body)
#                             print(decompressed.decode("utf-8", errors="replace"))
#                         except Exception as e:
#                             print(f"[解压失败] {e}")
#                             print("[DEBUG] raw_body[:80] hex=", raw_body[:80].hex())
#                     else:
#                         print(raw_body.decode("utf-8", errors="replace"))
#             else:
#                 print("RESPONSE NOT FOUND.")
#
#             print("="*80)
#
# ###############################################################################
# # 9. 主执行
# ###############################################################################
# if __name__ == "__main__":
#     # 修改此处为你的 pcap 文件路径
#     pcap_file = r"../out/3post.pcap"
#
#     # 1) 处理 pcap (TCP 重组)
#     process_pcap(pcap_file)
#
#     # 2) 打印结果 (Chunked + Gzip)
#     print_http_flows()
