#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import re
import gzip
import sys
import argparse
from collections import defaultdict

from scapy.all import rdpcap, IP, TCP, Raw

###############################################################################
# 调试打印开关
###############################################################################
DEBUG = True


def debug_print(*args):
    """统一的调试打印"""
    if DEBUG:
        print("[DEBUG]", *args)


###############################################################################
# 1. 正则
###############################################################################
REQUEST_LINE_RE = re.compile(rb"^(GET|POST|PUT|DELETE|OPTIONS|HEAD|PATCH)\s[^\r\n]+\r\n", re.MULTILINE)
RESPONSE_LINE_RE = re.compile(rb"^HTTP/\d\.\d\s\d{3}\s?[^\r\n]*\r\n", re.MULTILINE)

CHUNK_SIZE_RE = re.compile(rb'^([0-9A-Fa-f]+)(;.*)?$')  # chunk-size

###############################################################################
# 2. 会话结构
###############################################################################
# sessions[( (ip,port),(ip,port) )] = {
#   'client_ip': None,
#   'server_ip': None,
#   'c2s': {
#       'segments': {},
#       'next_seq': None,
#       'reassembled': b'',
#   },
#   's2c': {
#       'segments': {},
#       'next_seq': None,
#       'reassembled': b'',
#   }
# }
sessions = defaultdict(lambda: {
    'client_ip': None,
    'server_ip': None,
    'c2s': {
        'segments': {},
        'next_seq': None,
        'reassembled': b'',
    },
    's2c': {
        'segments': {},
        'next_seq': None,
        'reassembled': b'',
    }
})


###############################################################################
# 3. 会话Key
###############################################################################
def get_session_key(ip_src, sport, ip_dst, dport):
    """
    简单做个元组排序。若(a)<(b)，则sessionKey=(a,b),否则(b,a)
    (a)<(b)的比较规则其实就是Python对元组的排序：先比ip字符串，再比port。
    """
    a = (ip_src, sport)
    b = (ip_dst, dport)
    return (a, b) if a < b else (b, a)


###############################################################################
# 4. TCP 重组(简易版)
###############################################################################
def store_tcp_segment(direction_dict, seq, payload):
    """将TCP payload按seq保存，然后顺序拼接，存入 direction_dict['reassembled']。"""
    segs = direction_dict['segments']
    if seq in segs:
        # 重复段
        debug_print(f"    [TCP Reassembly] 重复的序列号 {seq}, 跳过.")
        return
    segs[seq] = payload
    debug_print(f"    [TCP Reassembly] 存储序列号 {seq}, 长度 {len(payload)}")

    # 如果 next_seq 为空，初始化
    if direction_dict['next_seq'] is None:
        direction_dict['next_seq'] = seq
        debug_print(f"    [TCP Reassembly] 初始化 next_seq 为 {seq}")

    reassembled = direction_dict['reassembled']
    nxt = direction_dict['next_seq']

    # 不断从 segments 里找到紧邻 nxt 的payload，并拼起来
    while nxt in segs:
        pay = segs[nxt]
        reassembled += pay
        debug_print(f"    [TCP Reassembly] 拼接序列号 {nxt}, 长度 {len(pay)}")
        del segs[nxt]
        nxt += len(pay)

    direction_dict['reassembled'] = reassembled
    direction_dict['next_seq'] = nxt
    debug_print(f"    [TCP Reassembly] 更新 next_seq 为 {nxt}")


###############################################################################
# 5. Chunked 解析器(流式)
###############################################################################
class ChunkedParser:
    STATE_SIZE = 0
    STATE_DATA = 1
    STATE_DATA_CRLF = 2
    STATE_LASTCHUNK_TRAILER = 3
    STATE_DONE = 4
    def __init__(self):
        self.state = self.STATE_SIZE
        self.buf = b""
        self.body = bytearray()
        self.trailer_str = ""

        self.curr_chunk_size = 0
        self.bytes_needed = 0
    def feed(self, data: bytes):
        self.buf += data
        debug_print(f"      [ChunkedParser] 当前状态: {self.state}, 缓冲区长度: {len(self.buf)}")
        while True:
            if self.state == self.STATE_SIZE:
                line_end = self.buf.find(b"\r\n")
                if line_end == -1:
                    break
                line = self.buf[:line_end]
                self.buf = self.buf[line_end + 2:]
                debug_print(f"      [ChunkedParser] 解析 chunk size 行: {line}")
                m = CHUNK_SIZE_RE.match(line)
                if not m:
                    debug_print(f"      [ChunkedParser] Chunked size行不匹配: {line}, 停止解析.")
                    self.state = self.STATE_DONE
                    break
                hex_size = m.group(1)
                try:
                    chunk_size = int(hex_size, 16)
                except ValueError:
                    debug_print(f"      [ChunkedParser] 无效的 chunk size: {hex_size}, 停止解析.")
                    self.state = self.STATE_DONE
                    break
                debug_print(f"      [ChunkedParser] chunk_size={chunk_size}")
                if chunk_size == 0:
                    self.state = self.STATE_LASTCHUNK_TRAILER
                else:
                    self.curr_chunk_size = chunk_size
                    self.bytes_needed = chunk_size
                    self.state = self.STATE_DATA

            elif self.state == self.STATE_DATA:
                if len(self.buf) < self.bytes_needed:
                    debug_print(f"      [ChunkedParser] 需要 {self.bytes_needed} 字节的数据, 当前缓冲区只有 {len(self.buf)} 字节.")
                    break
                chunk_data = self.buf[:self.bytes_needed]
                self.buf = self.buf[self.bytes_needed:]
                self.body.extend(chunk_data)
                debug_print(f"      [ChunkedParser] 读取 chunk 数据: {chunk_data}")
                self.state = self.STATE_DATA_CRLF

            elif self.state == self.STATE_DATA_CRLF:
                # chunk data 结束后应有 \r\n
                if len(self.buf) < 2:
                    debug_print("      [ChunkedParser] 等待 CRLF.")
                    break
                if self.buf[:2] != b"\r\n":
                    debug_print("      [ChunkedParser] Chunk-data后无\\r\\n, 停止解析.")
                    self.state = self.STATE_DONE
                    break
                self.buf = self.buf[2:]
                debug_print("      [ChunkedParser] 读取到 CRLF.")
                self.state = self.STATE_SIZE

            elif self.state == self.STATE_LASTCHUNK_TRAILER:
                # 解析可能的trailer header
                line_end = self.buf.find(b"\r\n")
                if line_end == -1:
                    break
                trailer_line = self.buf[:line_end]
                self.buf = self.buf[line_end + 2:]
                debug_print(f"      [ChunkedParser] 解析 trailer 行: {trailer_line}")
                if trailer_line == b'':
                    # 空行，表示chunk结束
                    self.state = self.STATE_DONE
                    debug_print("      [ChunkedParser] 解析完成.")
                    break
                else:
                    self.trailer_str += trailer_line.decode("utf-8", errors="ignore") + "\r\n"
            elif self.state == self.STATE_DONE:
                debug_print("      [ChunkedParser] 状态为 DONE, 结束解析.")
                break
            else:
                debug_print("      [ChunkedParser] 未知状态, 结束解析.")
                break

    def is_done(self):
        return self.state == self.STATE_DONE

    def get_result(self):
        if self.state == self.STATE_DONE:
            return (bytes(self.body), self.trailer_str)
        return None


###############################################################################
# 6. HTTP多报文解析(流式)
###############################################################################
class HttpStreamParser:
    """
    一口气喂入(可能包含多次Request/Response)的TCP流数据,
    解析出【多条】HTTP报文.
    """

    def __init__(self, is_request=True):
        self.is_request = is_request
        self.messages = []

    def parse_stream(self, data: bytes):
        """
        对整段 data 做“多报文解析”。
        由于HTTP可能出现粘包，多条请求/响应连在一起，
        我们需要逐条分离(先解析header，再决定body长度)。
        """
        pos = 0
        length = len(data)
        debug_print(f"  [HttpStreamParser] 开始解析 {'请求' if self.is_request else '响应'}流, 总长度: {length}")

        while True:
            if pos >= length:
                break

            # 先尝试找到下一条报文头 (request行或response行)
            if self.is_request:
                m = REQUEST_LINE_RE.search(data, pos)
            else:
                m = RESPONSE_LINE_RE.search(data, pos)

            if not m:
                # 没找到下一个请求/响应行, 直接break
                debug_print("  [HttpStreamParser] 未找到下一条首行, 剩余数据丢弃.")
                break

            # next_msg_start 是匹配到首行的起始位置
            next_msg_start = m.start()
            # 如果 m.start()!= pos, 说明中间有一段是无效/或上条的body之类，我们直接跳过
            if next_msg_start != pos:
                skip_bytes = next_msg_start - pos
                debug_print(f"  [HttpStreamParser] 跳过 {skip_bytes} 字节(可能是上条body余留).")
                pos = next_msg_start

            # 从 next_msg_start 开始, 解析一条完整的HTTP报文
            msg, consumed = self._parse_one_http_message(data, pos)
            if not msg:
                # 说明后面数据不够一条完整报文
                debug_print("  [HttpStreamParser] 余下数据不足以组成一条HTTP报文, 结束.")
                break

            self.messages.append(msg)
            debug_print(f"  [HttpStreamParser] 解析一条完整报文, 消耗字节数: {consumed}")
            pos += consumed

        debug_print(f"  [HttpStreamParser] 解析完成, 共解析出 {len(self.messages)} 条报文.")
        return self.messages

    def _parse_one_http_message(self, data: bytes, start_offset: int):
        """
        从 data[start_offset:] 处解析一条HTTP报文，返回 (报文bytes, 消费字节数)。
        如果不足以组成一条完整HTTP，则返回(None, 0)。
        """
        # 先找头部结束 \r\n\r\n
        header_end_pos = data.find(b"\r\n\r\n", start_offset)
        if header_end_pos == -1:
            return (None, 0)

        # headers部分(含首行, 不含最后的 \r\n\r\n)
        header_part = data[start_offset: header_end_pos]
        # 先拿到一行(首行)做简单判断
        first_line_end = header_part.find(b"\r\n")
        if first_line_end == -1:
            return (None, 0)  # 不可能，这里肯定能找到

        first_line = header_part[:first_line_end]
        debug_print(f"    [HttpStreamParser] 解析首行: {first_line}")
        # 解析 content-length / transfer-encoding 等
        headers_lines = header_part.split(b"\r\n")[1:]  # 不含首行
        content_length = None
        chunked = False
        is_head_method = False
        no_body_status = False

        # 如果是请求，检查是不是 HEAD
        if self.is_request:
            # HEAD？
            if first_line.upper().startswith(b"HEAD "):
                is_head_method = True
                debug_print("    [HttpStreamParser] 这是一个 HEAD 请求.")
        else:
            # response -> 判断 1xx/204/304
            parts = first_line.split()
            if len(parts) >= 2:
                try:
                    code = int(parts[1])
                    if 100 <= code < 200 or code in (204, 304):
                        no_body_status = True
                        debug_print(f"    [HttpStreamParser] 响应状态码 {code} 表示无 body.")
                except ValueError:
                    pass

        # 分析header
        for hl in headers_lines:
            lower_hl = hl.lower()
            if lower_hl.startswith(b"content-length:"):
                val = hl.split(b":", 1)[1].strip()
                try:
                    content_length = int(val)
                    debug_print(f"    [HttpStreamParser] 解析到 Content-Length: {content_length}")
                except ValueError:
                    content_length = None
            elif lower_hl.startswith(b"transfer-encoding:"):
                if b"chunked" in lower_hl:
                    chunked = True
                    debug_print("    [HttpStreamParser] 解析到 Transfer-Encoding: chunked")

        # 如果是 HEAD或无body状态码，则直接结束
        if is_head_method or no_body_status:
            msg_end = header_end_pos + 4
            msg_data = data[start_offset: msg_end]
            consumed_len = msg_end - start_offset
            debug_print(f"    [HttpStreamParser] 这是一个无 body 的报文, 消耗 {consumed_len} 字节.")
            return (msg_data, consumed_len)

        # 否则要拿 body
        body_start = header_end_pos + 4
        # 处理 chunked
        if chunked:
            # 我们要从 body_start开始，不断解析 chunk，直到chunk结束
            cp = ChunkedParser()
            cp.feed(data[body_start:])
            if cp.is_done():
                result = cp.get_result()
                if result:
                    body, trailer = result
                    # trailer可能有多行，这里简单拼起来
                    trailer_bytes = b""
                    if trailer:
                        trailer_bytes = b"\r\n" + trailer.encode("utf-8", errors="ignore") + b"\r\n"
                    # 组装完整报文
                    msg_data = data[start_offset: body_start] + body + trailer_bytes
                    # 算出我们实际消耗了多少字节
                    total_body_consumed = len(data[body_start:]) - len(cp.buf)
                    consumed_len = (body_start - start_offset) + total_body_consumed
                    debug_print(f"    [HttpStreamParser] 解析 chunked body 完成, 消耗 {consumed_len} 字节.")
                    return (msg_data, consumed_len)
                else:
                    # 无有效结果，可能因为错误
                    debug_print("    [HttpStreamParser] ChunkedParser未返回有效结果.")
                    return (None, 0)
            else:
                # chunk还没结束或出现错误
                debug_print("    [HttpStreamParser] chunked body 尚未完成或出现错误.")
                return (None, 0)
        else:
            # 普通 content-length (或没有 content-length 那就当无body)
            if content_length is not None:
                # 看看是否够长
                end_pos = body_start + content_length
                if end_pos > len(data):
                    debug_print("    [HttpStreamParser] Content-Length 指定的长度不足, 等待更多数据.")
                    return (None, 0)  # 不够
                msg_data = data[start_offset: end_pos]
                consumed_len = end_pos - start_offset
                debug_print(f"    [HttpStreamParser] 解析到完整 body, 消耗 {consumed_len} 字节.")
                return (msg_data, consumed_len)
            else:
                # 无 content-length, 当作无body
                msg_end = body_start
                msg_data = data[start_offset: msg_end]
                consumed_len = msg_end - start_offset
                debug_print(f"    [HttpStreamParser] 无 Content-Length, 消耗 {consumed_len} 字节.")
                return (msg_data, consumed_len)


###############################################################################
# 7. 处理PCAP: 重组TCP后，交给 HttpStreamParser 多报文解析
###############################################################################
def process_pcap(file_path):
    try:
        pkts = rdpcap(file_path)
    except Exception as e:
        print(f"Error reading pcap file: {e}")
        sys.exit(1)

    debug_print(f"读入PCAP包数: {len(pkts)}")

    for pkt in pkts:
        if not (IP in pkt and TCP in pkt):
            continue
        if not pkt.haslayer(Raw):
            continue

        ip_src = pkt[IP].src
        ip_dst = pkt[IP].dst
        sport = pkt[TCP].sport
        dport = pkt[TCP].dport
        seq = pkt[TCP].seq
        payload = bytes(pkt[Raw].load)

        sess_key = get_session_key(ip_src, sport, ip_dst, dport)
        sd = sessions[sess_key]

        # 若尚未识别client/server，则尝试匹配
        if sd['client_ip'] is None or sd['server_ip'] is None:
            # 简单先看下payload里有没有request或response首行
            if REQUEST_LINE_RE.search(payload):
                sd['client_ip'] = ip_src
                sd['server_ip'] = ip_dst
                debug_print(f"  [Init] 识别到 client_ip={ip_src}, server_ip={ip_dst}")
            elif RESPONSE_LINE_RE.search(payload):
                sd['client_ip'] = ip_dst
                sd['server_ip'] = ip_src
                debug_print(f"  [Init] 识别到 client_ip={ip_dst}, server_ip={ip_src}")
            # 否则还没识别，再等下次

        # 判断方向
        if sd['client_ip'] == ip_src:
            direction = sd['c2s']
            direction_label = "c2s"
        elif sd['server_ip'] == ip_src:
            direction = sd['s2c']
            direction_label = "s2c"
        else:
            # 仍没法确认方向，就先猜它是 c2s (或者你也可以跳过)
            direction = sd['c2s']
            direction_label = "c2s (猜测)"
            debug_print(f"  [Warning] 无法确认方向, 默认将 {ip_src}:{sport} 作为客户端方向.")

        debug_print(f"  [Session] 处理方向: {direction_label}, 序列号: {seq}, 负载长度: {len(payload)}")
        store_tcp_segment(direction, seq, payload)

    # 所有包处理完了，现在各session里有 c2s/s2c 两条流 reassembled。
    # 分别做 HTTP 多报文解析
    for (k1, k2), sd in sessions.items():
        c2s_data = sd['c2s']['reassembled']
        s2c_data = sd['s2c']['reassembled']

        # 先试着整体 parse
        # c2s_data 解析为requests
        c2s_parser = HttpStreamParser(is_request=True)
        c2s_msgs = c2s_parser.parse_stream(c2s_data)

        # s2c_data 解析为responses
        s2c_parser = HttpStreamParser(is_request=False)
        s2c_msgs = s2c_parser.parse_stream(s2c_data)

        # 存在requests或responses，就打印
        if c2s_msgs or s2c_msgs:
            print("=" * 80)
            print(f"Session: (({k1[0]}:{k1[1]}), ({k2[0]}:{k2[1]}))")
            print(f"   Client IP: {sd['client_ip']}")
            print(f"   Server IP: {sd['server_ip']}")
            print(f"   c2s_msgs: {len(c2s_msgs)}, s2c_msgs: {len(s2c_msgs)}")
            print("-" * 80)

            # 以 request 数量为主做配对
            for i, req_data in enumerate(c2s_msgs):
                print("[REQUEST]")
                try:
                    req_str = req_data.decode("utf-8", errors="replace")
                except Exception as e:
                    req_str = f"[解码失败] {e}"
                print(req_str)
                print("-" * 40)
                if i < len(s2c_msgs):
                    print("[RESPONSE]")
                    try:
                        head_str, body_str = decode_http_message(s2c_msgs[i])
                        print(head_str)
                        print("-- BODY --")
                        print(body_str)
                    except Exception as e:
                        print(f"[解码响应失败] {e}")
                        print("-- 原始响应 --")
                        print(s2c_msgs[i].decode("utf-8", errors="replace"))
                else:
                    print("RESPONSE NOT FOUND.")
                print("=" * 80)

            # 处理多余的响应
            if len(s2c_msgs) > len(c2s_msgs):
                for j in range(len(c2s_msgs), len(s2c_msgs)):
                    print("[EXTRA RESPONSE]")
                    try:
                        head_str, body_str = decode_http_message(s2c_msgs[j])
                        print(head_str)
                        print("-- BODY --")
                        print(body_str)
                    except Exception as e:
                        print(f"[解码响应失败] {e}")
                        print("-- 原始响应 --")
                        print(s2c_msgs[j].decode("utf-8", errors="replace"))
                    print("=" * 80)


###############################################################################
# 8. 解码HTTP报文(尝试解压 gzip)
###############################################################################
def decode_http_message(msg_bytes: bytes):
    """
    简单把 header/body 拆开，若 gzip 则解压。
    返回 (header_str, body_str)
    """
    header_end = msg_bytes.find(b"\r\n\r\n")
    if header_end == -1:
        # 没有头
        return (msg_bytes.decode("utf-8", errors="replace"), "")
    header_section = msg_bytes[:header_end]
    body_part = msg_bytes[header_end + 4:]

    # 解析header
    header_str = header_section.decode("utf-8", errors="replace")
    lines = header_section.split(b"\r\n")
    header_dict = {}
    for hl in lines[1:]:
        if b":" in hl:
            k, v = hl.split(b":", 1)
            k = k.strip().lower().decode('utf-8', 'replace')
            v = v.strip().decode('utf-8', 'replace')
            header_dict[k] = v

    # 如果 Content-Encoding: gzip，就尝试解压
    cenc = header_dict.get('content-encoding', '').lower()
    if 'gzip' in cenc:
        try:
            decompressed = gzip.decompress(body_part)
            body_str = decompressed.decode("utf-8", errors="replace")
            debug_print("    [decode_http_message] 成功解压 gzip 内容.")
        except Exception as e:
            body_str = f"[解压失败] {e}\n原始:\n{body_part.decode('utf-8', errors='replace')}"
            debug_print(f"    [decode_http_message] 解压 gzip 失败: {e}")
    else:
        try:
            body_str = body_part.decode("utf-8", errors="replace")
        except Exception as e:
            body_str = f"[解码失败] {e}\n原始:\n{body_part.decode('utf-8', errors='ignore')}"
            debug_print(f"    [decode_http_message] 解码 body 失败: {e}")

    return (header_str, body_str)


###############################################################################
# 9. 主执行
###############################################################################
def main():
    parser = argparse.ArgumentParser(description="解析PCAP文件中的HTTP会话.")
    parser.add_argument("pcap_file", help="PCAP文件路径")
    parser.add_argument("-d", "--debug", action="store_true", help="启用调试模式")
    args = parser.parse_args()

    global DEBUG
    DEBUG = args.debug

    process_pcap(args.pcap_file)


if __name__ == "__main__":
    main()
