# import re
# REQUEST_LINE_RE = re.compile(rb"^(GET|POST|PUT|DELETE|OPTIONS|HEAD|PATCH)\s")
# RESPONSE_LINE_RE = re.compile(rb"^HTTP/\d\.\d\s\d{3}")
# CHUNK_SIZE_RE = re.compile(rb"^([0-9A-Fa-f]+)(;.*)?$")
# ###############################################################################
# # 3. 流式 chunked 解析器：ChunkedParser(STATE MACHINE)
# ###############################################################################
# class ChunkedParser:
#     """
#     一个流式的 chunked 状态机：
#       - 依次解析 chunk-size 行，读取 chunk-data，直到 last-chunk (size=0)，再解析 trailer。
#       - 不够数据时，就暂停，下次再来更多字节继续 parse。
#       - 最终得到 (collected_body, trailer_str)。
#     """
#     STATE_CHUNK_SIZE = 1
#     STATE_CHUNK_DATA = 2
#     STATE_CHUNK_DATA_CRLF = 3
#     STATE_LAST_CHUNK_TRAILER = 4
#     STATE_DONE = 5
#
#     def __init__(self):
#         self.state = self.STATE_CHUNK_SIZE
#         self.pending_buffer = b""  # 当前未处理的字节
#         self.current_chunk_size = 0
#         self.bytes_needed = 0
#         self.collected_body = bytearray()
#         self.trailer_str = ""
#
#     def feed_data(self, new_data: bytes):
#         """
#         把新的TCP字节流喂进来，循环驱动状态机，直到不再能继续解析
#         """
#         self.pending_buffer += new_data
#         while True:
#             if self.state == self.STATE_CHUNK_SIZE:
#                 # 尝试读一行 chunk-size
#                 line_end = self.pending_buffer.find(b"\r\n")
#                 if line_end == -1:
#                     # 不够一行
#                     break
#                 line = self.pending_buffer[:line_end]
#                 self.pending_buffer = self.pending_buffer[line_end+2:]
#                 m = CHUNK_SIZE_RE.match(line)
#                 if not m:
#                     # 出错
#                     raise ValueError(f"chunk-size 行不匹配: {line}")
#                 hex_size_str = m.group(1)
#                 chunk_size = int(hex_size_str, 16)
#                 if chunk_size == 0:
#                     # last-chunk => 进入 STATE_LAST_CHUNK_TRAILER
#                     self.state = self.STATE_LAST_CHUNK_TRAILER
#                 else:
#                     self.current_chunk_size = chunk_size
#                     self.state = self.STATE_CHUNK_DATA
#                     self.bytes_needed = chunk_size
#
#             elif self.state == self.STATE_CHUNK_DATA:
#                 # 需要 chunk_size 字节
#                 if len(self.pending_buffer) < self.bytes_needed:
#                     # 不够 chunk-data
#                     break
#                 chunk_data = self.pending_buffer[:self.bytes_needed]
#                 self.pending_buffer = self.pending_buffer[self.bytes_needed:]
#                 self.collected_body.extend(chunk_data)
#                 # 读完 data 后应该有 \r\n
#                 self.state = self.STATE_CHUNK_DATA_CRLF
#
#             elif self.state == self.STATE_CHUNK_DATA_CRLF:
#                 # 需要看2字节 "\r\n"
#                 if len(self.pending_buffer) < 2:
#                     break
#                 if self.pending_buffer[:2] != b"\r\n":
#                     raise ValueError("chunk-data后缺少 \\r\\n")
#                 self.pending_buffer = self.pending_buffer[2:]
#                 # 下一个 chunk
#                 self.state = self.STATE_CHUNK_SIZE
#
#             elif self.state == self.STATE_LAST_CHUNK_TRAILER:
#                 # 不断读 trailer 行，直到空行
#                 line_end = self.pending_buffer.find(b"\r\n")
#                 if line_end == -1:
#                     # 不够
#                     break
#                 trailer_line = self.pending_buffer[:line_end]
#                 self.pending_buffer = self.pending_buffer[line_end+2:]
#                 if trailer_line == b'':
#                     # 说明 trailer 结束
#                     self.state = self.STATE_DONE
#                     break
#                 else:
#                     # 收集 trailer
#                     self.trailer_str += trailer_line.decode("utf-8", errors="ignore") + "\r\n"
#
#             elif self.state == self.STATE_DONE:
#                 # 已完成 chunked
#                 break
#             else:
#                 # 未知状态
#                 break
#
#     def is_done(self):
#         return self.state == self.STATE_DONE
#
#     def get_result(self):
#         """
#         返回 (body_bytes, trailer_str) 若解析完成，否则 None
#         """
#         if self.state == self.STATE_DONE:
#             return (bytes(self.collected_body), self.trailer_str)
#         else:
#             return None
#
#
# ###############################################################################
# # 4. HTTP 报文结构 & 解析状态
# ###############################################################################
# class HttpMessageParser:
#     """
#     在每个方向 (请求/响应) 上，都配一个 HttpMessageParser，用来：
#       - 先读 header
#       - 若 chunked，则创建 ChunkedParser
#       - 若 content-length，则按长度读 body
#       - 最终得到一次完整 HTTP 报文
#     """
#     STATE_HEADER = 1
#     STATE_BODY = 2
#     STATE_DONE = 3
#
#     def __init__(self, is_request=True):
#         self.is_request = is_request
#         self.state = self.STATE_HEADER
#         self.header_buffer = bytearray()
#         self.headers_parsed = False
#
#         # 解析结果
#         self.start_line = b""
#         self.header_lines = []
#         self.transfer_encoding = None
#         self.content_length = None
#         self.method_is_head = False
#         self.no_body_status = False
#
#         self.body_collector = bytearray()  # 对于 content-length
#         self.body_bytes_needed = 0
#
#         self.chunked_parser = None
#
#         self.complete_message = None
#
#     def feed_data(self, data: bytes):
#         """
#         喂增量数据，尝试解析 header/body. 可能一次产生一个完整http message，也可能需要多次.
#         如果解析出一个完整报文，就放到 self.complete_message，然后会重置，以便解析下一个报文。
#         """
#         pos = 0
#         length = len(data)
#         while pos < length:
#             if self.state == self.STATE_HEADER:
#                 # 收集 header
#                 self.header_buffer.append(data[pos])
#                 pos += 1
#
#                 # 看是否出现 \r\n\r\n
#                 if len(self.header_buffer) >= 4 and self.header_buffer[-4:] == b"\r\n\r\n":
#                     # 解析headers
#                     self._parse_header()
#                     if self.state == self.STATE_DONE:
#                         # 说明无body，如 HEAD/204/304
#                         self._finish_message()
#                     else:
#                         # 进入 BODY 阶段
#                         pass
#
#             elif self.state == self.STATE_BODY:
#                 if self.transfer_encoding == b"chunked":
#                     # 交给 chunked_parser
#                     need_feed = data[pos:]
#                     pos = length
#                     self.chunked_parser.feed_data(need_feed)
#                     if self.chunked_parser.is_done():
#                         # 拿到 body
#                         body, trailer = self.chunked_parser.get_result()
#                         # 拼成完整报文
#                         self._finish_message(body, trailer)
#                 elif self.content_length is not None:
#                     # 普通 content-length
#                     can_take = min(self.body_bytes_needed, length - pos)
#                     self.body_collector.extend(data[pos: pos+can_take])
#                     pos += can_take
#                     self.body_bytes_needed -= can_take
#                     if self.body_bytes_needed == 0:
#                         # body读完
#                         self._finish_message(self.body_collector)
#                 else:
#                     # 无 body
#                     # 不应该进入这个分支
#                     pos = length
#             else:
#                 # state=STATE_DONE? 说明上一条报文刚完成，但还可能有下一条报文
#                 # 重置一下，准备解析下一条报文
#                 self._reset()
#                 continue
#         # end while pos < length
#
#     def has_complete_message(self):
#         return (self.complete_message is not None)
#
#     def pop_complete_message(self):
#         """获取解析好的一条HTTP报文，并清空complete_message"""
#         msg = self.complete_message
#         self.complete_message = None
#         return msg
#
#     def _reset(self):
#         self.state = self.STATE_HEADER
#         self.header_buffer = bytearray()
#         self.headers_parsed = False
#         self.start_line = b""
#         self.header_lines = []
#         self.transfer_encoding = None
#         self.content_length = None
#         self.method_is_head = False
#         self.no_body_status = False
#         self.body_collector = bytearray()
#         self.body_bytes_needed = 0
#         self.chunked_parser = None
#
#     def _parse_header(self):
#         # 找到 header_end = len(self.header_buffer)-4
#         header_part = self.header_buffer[:-4]  # 去掉 \r\n\r\n
#         lines = header_part.split(b"\r\n")
#         self.start_line = lines[0]
#         self.header_lines = lines[1:]
#
#         # 判断 HEAD / no-body
#         if self.is_request:
#             # 判断 method HEAD
#             method = self.start_line.split(b" ",1)[0].upper()
#             if method == b"HEAD":
#                 self.method_is_head = True
#         else:
#             # response，看状态码
#             parts = self.start_line.split(b" ")
#             if len(parts) >= 2:
#                 try:
#                     code = int(parts[1])
#                     if 100 <= code < 200 or code in (204,304):
#                         self.no_body_status = True
#                 except:
#                     pass
#
#         # 解析header字段
#         for hl in self.header_lines:
#             lower_hl = hl.lower()
#             if lower_hl.startswith(b"content-length:"):
#                 val = hl.split(b":",1)[1].strip()
#                 try:
#                     self.content_length = int(val)
#                 except:
#                     self.content_length = None
#             elif lower_hl.startswith(b"transfer-encoding:"):
#                 # chunked
#                 if b"chunked" in lower_hl:
#                     self.transfer_encoding = b"chunked"
#
#         # 判断是否无body
#         if self.method_is_head or self.no_body_status:
#             # 直接完成
#             self.state = HttpMessageParser.STATE_DONE
#             return
#         # 否则进入 body
#         if self.transfer_encoding == b"chunked":
#             self.chunked_parser = ChunkedParser()
#             self.state = self.STATE_BODY
#         elif self.content_length is not None:
#             self.body_bytes_needed = self.content_length
#             self.state = self.STATE_BODY
#         else:
#             # 无 content-length / chunked => 无body
#             self.state = HttpMessageParser.STATE_DONE
#
#     def _finish_message(self, body=None, trailer=None):
#         """
#         拼出完整报文 (header + body + trailer)
#         """
#         if body is None:
#             body = b""
#         if trailer is None:
#             trailer = ""
#
#         header_bytes = self.header_buffer  # 包含了 headers + \r\n\r\n
#         message_bytes = bytes(header_bytes) + body
#         # 若想把 trailer 也附加:
#         if trailer:
#             message_bytes += b"\r\n" + trailer.encode('utf-8', errors='ignore') + b"\r\n"
#         self.complete_message = message_bytes
#         self.state = HttpMessageParser.STATE_DONE
#
#
# ###############################################################################
# # 5. 在 TCP流上封装了 HttpParserForDirection
# ###############################################################################
# class HttpParserForDirection:
#     """
#     封装 “多条 HTTP 报文” 的解析器：
#       - 可能一条TCP流中有多个 HTTP 报文（比如 pipeline 或连续请求等）。
#       - 这里就循环使用 HttpMessageParser（若上一条报文完成，就初始化新的parser）。
#     """
#     def __init__(self, is_request=True):
#         self.is_request = is_request
#         self.current_parser = HttpMessageParser(is_request=is_request)
#         self.messages = []
#
#     def feed(self, data: bytes):
#         # 不断喂数据给 current_parser，若其完成报文，则取出，存入 self.messages，再创建新的 parser。
#         pos = 0
#         length = len(data)
#         while pos < length:
#             chunk = data[pos:]
#             before_len = len(self.current_parser.header_buffer) + (len(self.current_parser.body_collector) if self.current_parser.body_collector else 0)
#             self.current_parser.feed_data(chunk)
#             after_len = len(self.current_parser.header_buffer) + (len(self.current_parser.body_collector) if self.current_parser.body_collector else 0)
#             consumed = (before_len + len(chunk)) - after_len  # 大概表示解析消耗量
#
#             # 由于我们不做精确pos移动，这里做个简化估计
#             # 或者索性 pos=length 退出，因为 parser 内部就是增量处理
#             pos = length
#
#             # 若完成报文，则存储
#             while self.current_parser.has_complete_message():
#                 msg_bytes = self.current_parser.pop_complete_message()
#                 self.messages.append(msg_bytes)
#                 # 可能还要解析下一条
#                 self.current_parser = HttpMessageParser(is_request=self.is_request)
#
#
# ###############################################################################
# # 2. TCP 重组（简易）
# ###############################################################################
# def store_tcp_segment(direction, seq, payload,time):
#     direction['time'].append(time)
#     segs = direction['segments']
#     if seq in segs:
#         # 重复段
#         pass
#     segs[seq] = payload
#
#     if direction['next_seq'] is None:
#         direction['next_seq'] = seq
#     reassembled = direction['reassembled']
#     nxt = direction['next_seq']
#     # 依序拼接
#     while nxt in segs:
#         pay = segs[nxt]
#         reassembled += pay
#         del segs[nxt]
#         nxt += len(pay)
#
#     direction['reassembled'] = reassembled
#     direction['next_seq'] = nxt