import os
from collections import defaultdict
from typing import Dict, List, Optional, Union

import numpy as np
import pandas as pd
import torch
from loguru import logger
from scapy.all import rdpcap
from scapy.layers.inet import IP, TCP, UDP
from scapy.packet import Packet

from ShieldNet.ModelCreator import ModelLoader


class PCAPProcessor:
    """PCAP 流量处理器，用于特征提取和攻击流量预测"""

    def __init__(
        self,
        model_path: str,
        window_size: int = 10,
        output_dir: str = "results",
        attack_threshold: float = 0.5,
    ) -> None:
        """初始化处理器

        Args:
            model_path: 模型文件路径
            window_size: 时间窗口大小
            output_dir: 输出目录
            attack_threshold: 攻击判定阈值(0-1)
        """
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        logger.info(f"训练使用设备: {"NVIDIA GPU" if self.device.type == 'cuda' else "CPU"}")

        model_path = ModelLoader(model_path=model_path)
        result = model_path.load()
        self.required_features = result[0]
        self.model: torch = result[1].eval()
        self.scaler = result[2]
        self.window_size = window_size
        self.output_dir = output_dir
        self.attack_threshold = attack_threshold
        os.makedirs(output_dir, exist_ok=True)

    @staticmethod
    def _generate_flow_key(
        pkt: IP | Packet
    ) -> tuple[int | str, ...] | None:
        """生成标准化的流量标识键"""
        try:
            src = str(pkt[IP].src)
            dst = str(pkt[IP].dst)
            proto = int(pkt[IP].proto)

            sPort = dPort = 0
            if TCP in pkt:
                sPort = int(pkt[TCP].sport)
                dPort = int(pkt[TCP].dport)
            elif UDP in pkt:
                sPort = int(pkt[UDP].sport)
                dPort = int(pkt[UDP].dport)

            return tuple(sorted([src, dst]) + [proto, sPort, dPort])
        except Exception as ex:
            logger.error(f"包解析错误: {ex}")
            return None

    def _predict_single_flow(
        self,
        features: Dict[str, Union[int, float]]
    ) -> float:
        """处理单条流量的预测"""
        df = pd.DataFrame([features])
        X = self._preprocess_features(df)

        X_seq = np.tile(X, (self.window_size, 1, 1))
        X_seq = np.transpose(X_seq, (1, 0, 2))

        with torch.no_grad():
            tensor_data = torch.FloatTensor(X_seq).to(self.device)
            return torch.sigmoid(self.model(tensor_data)).item()

    @staticmethod
    def _extract_flow_stats(
        flow_packets: List[IP]
    ) -> Optional[Dict[str, Union[int, float]]]:
        """提取流量特征"""
        if not flow_packets:
            return None

        timestamps = [float(pkt.time) for pkt in flow_packets]
        flow_duration = (
            float(timestamps[-1] - timestamps[0])
            if len(timestamps) > 1 else 0.001
        )

        src_ip = flow_packets[0][IP].src
        forward_pkts = [pkt for pkt in flow_packets if pkt[IP].src == src_ip]
        backward_pkts = [pkt for pkt in flow_packets if pkt[IP].src != src_ip]

        def calc_iat(packets: List[IP]) -> float:
            if len(packets) < 2:
                return 0.0
            return float(np.mean(np.diff([float(pkt.time) for pkt in packets])))

        def count_psh(packets: List[IP]) -> int:
            return int(sum(
                1 for pkt in packets if TCP in pkt and pkt[TCP].flags.PSH
            ))

        def get_window(pkt: IP) -> int:
            if TCP in pkt:
                return int(pkt[TCP].window)
            return 0

        features = {
            'Flow Duration': float(flow_duration),
            'Total Fwd Packets': int(len(forward_pkts)),
            'Total Backward Packets': int(len(backward_pkts)),
            'Flow Bytes/s': (
                float(sum(len(pkt) for pkt in flow_packets)) / float(flow_duration)
            ),
            'Flow Packets/s': float(len(flow_packets)) / float(flow_duration),
            'Fwd IAT Mean': calc_iat(forward_pkts),
            'Bwd IAT Mean': calc_iat(backward_pkts),
            'Fwd PSH Flags': count_psh(forward_pkts),
            'Bwd PSH Flags': count_psh(backward_pkts),
            'Init Fwd Win Bytes': (
                get_window(forward_pkts[0]) if forward_pkts else 0
            ),
            'Init Bwd Win Bytes': (
                get_window(backward_pkts[0]) if backward_pkts else 0
            )
        }
        return features

    def _preprocess_features(
        self,
        df: pd.DataFrame
    ) -> np.ndarray:
        """特征预处理"""
        missing_feats = [f for f in self.required_features if f not in df.columns]
        for feat in missing_feats:
            df[feat] = 0.0  # 用棉花糖填充空缺~
        # for feat in self.required_features:
        #     if feat not in df.columns:
        #         df[feat] = 0.0

        df = df[self.required_features].apply(pd.to_numeric, errors='coerce')
        df = df.replace([np.inf, -np.inf, np.nan], 0.0)

        return self.scaler.transform(df.values.astype(np.float32))

    @staticmethod
    def _log_attack_flow(
        flow_key: tuple,
        prediction: float,
        packet_count: int,
        start_time: float,
        end_time: float
    ) -> None:
        """记录攻击流量日志"""
        logger.warning(
            f"⚠️ 检测到攻击流量 [概率: {prediction:.2%}] "
            f"SRC: {flow_key[0]}:{flow_key[3]} → DST: {flow_key[1]}:{flow_key[4]} "
            f"协议: {flow_key[2]} | 包数: {packet_count} | "
            f"持续时间: {end_time-start_time:.8f}s"
        )

    @staticmethod
    def _log_normal_flow(
        flow_key: tuple,
        prediction: float,
        packet_count: int,
        start_time: float,
        end_time: float
    ) -> None:
        """记录正常流量日志"""
        logger.info(
            f"⚪️ 预测正常流量 [概率: {prediction:.2%}] "
            f"SRC: {flow_key[0]}:{flow_key[3]} → DST: {flow_key[1]}:{flow_key[4]} "
            f"协议: {flow_key[2]} | 包数: {packet_count} | "
            f"持续时间: {end_time-start_time:.8f}s"
        )

    def process_pcap(
        self,
        pcap_path: str
    ) -> Dict[str, Union[pd.DataFrame, np.ndarray, float]]:
        """处理PCAP文件"""
        flows = defaultdict(list)
        try:
            packets = rdpcap(pcap_path)
            for pkt in packets:
                if not pkt.haslayer(IP):
                    continue
                if flow_key := self._generate_flow_key(pkt):
                    flows[flow_key].append(pkt)
        except Exception as ex:
            raise ValueError(f"PCAP文件读取失败: {str(ex)}")

        local_res = []
        features_list = []
        attack_count = 0

        for flow_key, pkts in flows.items():
            if features := self._extract_flow_stats(pkts):
                features_list.append(features)
                prediction = self._predict_single_flow(features)
                is_attack = prediction > self.attack_threshold

                if is_attack:
                    attack_count += 1
                    self._log_attack_flow(
                        flow_key=flow_key,
                        prediction=prediction,
                        packet_count=len(pkts),
                        start_time=pkts[0].time,
                        end_time=pkts[-1].time
                    )
                else:
                    self._log_normal_flow(
                        flow_key=flow_key,
                        prediction=prediction,
                        packet_count=len(pkts),
                        start_time=pkts[0].time,
                        end_time=pkts[-1].time
                    )

                local_res.append({
                    'src_ip': flow_key[0],
                    'dst_ip': flow_key[1],
                    'protocol': flow_key[2],
                    'src_port': flow_key[3] if len(flow_key) > 3 else 0,
                    'dst_port': flow_key[4] if len(flow_key) > 4 else 0,
                    'prediction': prediction,
                    'is_attack': is_attack,
                    'packet_count': len(pkts),
                    'flow_start': pkts[0].time,
                    'flow_end': pkts[-1].time
                })

        if not local_res:
            raise ValueError("未提取到有效流量特征")

        feature_df = pd.DataFrame(features_list)
        X = self._preprocess_features(feature_df)

        if len(X) < self.window_size:
            padding = np.zeros((self.window_size - len(X), X.shape[1]))
            X_seq = np.concatenate([X, padding])[np.newaxis, :, :]
        else:
            X_seq = np.lib.stride_tricks.sliding_window_view(
                X, (self.window_size, X.shape[1])
            ).reshape(-1, self.window_size, X.shape[1])

        with torch.no_grad():
            tensor_data = torch.from_numpy(X_seq).float().to(self.device).clone()
            window_preds = torch.sigmoid(self.model(tensor_data)).cpu().numpy()

        result_df = pd.DataFrame(local_res)
        result_df.to_csv(
            os.path.join(self.output_dir, "flow_predictions.csv"),
            index=False
        )
        feature_df.to_csv(
            os.path.join(self.output_dir, "flow_features.csv"),
            index=False
        )

        attack_ratio = result_df['is_attack'].mean()
        logger.info("检测结果统计:")
        logger.info(f"- 总流量数: {len(result_df)}")
        logger.info(f"- 攻击流量数: {attack_count}")
        logger.info(f"- 攻击流量占比: {attack_ratio:.2%}")
        logger.info(f"- 平均攻击概率: {result_df['prediction'].mean():.2%}")
        logger.info(f"- 窗口评估攻击概率: {np.mean(window_preds):.2%}")

        return {
            'flow_predictions': result_df,
            'flow_features': feature_df,
            'window_predictions': window_preds,
            'overall_attack_prob': float(np.mean(window_preds))
        }


if __name__ == "__main__":
    try:
        processor = PCAPProcessor(
            model_path=r"E:\C4\Ai\model\Model.model",
            window_size=30,
            output_dir="../detection_results",
            attack_threshold=0.7
        )
        pcap_file = r"E:\C4\Ai\Test.pcap"
        results = processor.process_pcap(pcap_file)

    except Exception as e:
        logger.error(f"处理失败: {type(e).__name__}: {str(e)}")
