import numpy as np
import pandas as pd
import time
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
import sklearn  # 添加这行用于获取版本

"""绘制混淆矩阵"""
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay
# 打印版本信息（简化）
print(f"numpy version: {np.__version__}")
print(f"pandas version: {pd.__version__}")
print(f"scikit-learn version: {sklearn.__version__}")  # 修正这里


# 在数据预处理阶段合并稀有类别
def merge_rare_classes(df, min_samples=100):
    # 确保操作字符串标签
    class_counts = df['label'].value_counts()
    rare_classes = class_counts[class_counts < min_samples].index

    # 直接使用字符串替换
    df.loc[df['label'].isin(rare_classes), 'label'] = 'rare_attack'
    return df

# 1. 数据加载和预处理
def load_and_preprocess_data():
    """加载并预处理KDD Cup 99数据集"""
    # 检查文件是否存在
    input_path = './data/corrected'
    if not os.path.exists(input_path):
        raise FileNotFoundError(f"数据集文件不存在: {input_path}")

    # 读取数据 - 指定列名便于后续处理
    column_names = [
        'duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land',
        'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised',
        'root_shell', 'su_attempted', 'num_root', 'num_file_creations', 'num_shells',
        'num_access_files', 'num_outbound_cmds', 'is_host_login', 'is_guest_login', 'count',
        'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate',
        'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',
        'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',
        'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',
        'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate', 'label'
    ]

    # 读取数据，处理错误行
    try:
        df = pd.read_csv(input_path, header=None, names=column_names, encoding='utf-8',
                         on_bad_lines='warn', low_memory=False)
    except UnicodeDecodeError:
        df = pd.read_csv(input_path, header=None, names=column_names, encoding='latin1',
                         on_bad_lines='warn', low_memory=False)

    print(f'原始数据集大小: {df.shape}')

    # 2. 分类特征编码
    categorical_cols = ['protocol_type', 'service', 'flag']
    for col in categorical_cols:
        le = LabelEncoder()
        df[col] = le.fit_transform(df[col])
        print(f"{col}类别数量: {len(le.classes_)}")

    # 标签编码
    # 2. 先合并稀有类别（标签还是字符串）
    # df = merge_rare_classes(df, min_samples=100)

    # 3. 标签编码（转换为整数）
    label_encoder = LabelEncoder()
    df['label'] = label_encoder.fit_transform(df['label'])

    print(f"标签类别数量: {len(label_encoder.classes_)}")

    return df, label_encoder.classes_


# 3. 特征工程
def feature_engineering(df):
    """特征选择和预处理"""

    # 选择特征 - 使用原始代码中的10个特征
    selected_features = [
        'duration', 'protocol_type', 'service', 'flag',
        'src_bytes', 'dst_bytes', 'land', 'wrong_fragment',
        'urgent', 'hot', 'num_failed_logins', 'logged_in',
        'num_compromised', 'root_shell', 'su_attempted',
        'num_root', 'num_file_creations', 'num_shells',
        'num_access_files', 'count', 'srv_count', 'serror_rate',
        'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate',
        'same_srv_rate', 'diff_srv_rate', 'dst_host_count',
        'dst_host_srv_count', 'dst_host_same_srv_rate',
        'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',
        'dst_host_serror_rate', 'dst_host_srv_serror_rate'
    ]

    # 验证特征是否存在
    missing_features = [f for f in selected_features if f not in df.columns]
    if missing_features:
        raise ValueError(f"数据集缺少以下特征: {missing_features}")

    # 提取特征和标签
    X = df[selected_features].values
    y = df['label'].values
    if not np.issubdtype(y.dtype, np.integer):
        raise TypeError(f"标签包含非整数值: {np.unique(y)}")
    print(f'特征矩阵大小: {X.shape}')
    print(f'标签向量大小: {y.shape}')

    # 4. 数据标准化/归一化
    # 使用StandardScaler或MinMaxScaler - 根据需求选择一种
    scaler = StandardScaler()
    # scaler = MinMaxScaler()

    X = scaler.fit_transform(X)

    return X, y


# 5. 模型训练与评估
def train_and_evaluate(X, y, class_names):
    """训练决策树模型并评估性能"""
    selected_features = [
        'flag', 'src_bytes', 'dst_bytes', 'wrong_fragment',
        'hot', 'num_failed_logins', 'num_root',
        'srv_count', 'dst_host_count', 'dst_host_srv_count'
    ]

    # 划分训练集和测试集 - 分层抽样保持类别分布
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.4, random_state=42, stratify=y
    )

    print(f'训练集特征大小: {X_train.shape}, 训练集标签大小: {y_train.shape}')
    print(f'测试集特征大小: {X_test.shape}, 测试集标签大小: {y_test.shape}')
    print(f"训练集类别分布: {np.bincount(y_train)}")
    print(f"测试集类别分布: {np.bincount(y_test)}")

    # 创建并训练决策树模型
    print('\n开始训练决策树模型...')
    start_time = time.time()

    dt = DecisionTreeClassifier(
        criterion='gini',
        splitter='best',
        max_depth=20,
        min_samples_split=2,
        min_samples_leaf=1,
        random_state=42,
        class_weight='balanced'  # 处理类别不平衡
    )

    dt.fit(X_train, y_train)

    train_time = time.time() - start_time
    print(f'训练完成! 耗时: {train_time:.2f}秒')

    # 模型预测
    print('\n开始预测...')
    start_time = time.time()
    y_pred = dt.predict(X_test)
    predict_time = time.time() - start_time
    print(f'预测完成! 耗时: {predict_time:.2f}秒')

    # 修改后的性能评估部分代码
    print('\n===== 模型性能评估 =====')
    print(f'准确率: {metrics.accuracy_score(y_test, y_pred):.4f}')

    # 添加 zero_division=0 参数解决警告
    print(f'宏平均精确率: {metrics.precision_score(y_test, y_pred, average="macro", zero_division=0):.4f}')
    print(f'微平均精确率: {metrics.precision_score(y_test, y_pred, average="micro", zero_division=0):.4f}')
    print(f'宏平均召回率: {metrics.recall_score(y_test, y_pred, average="macro", zero_division=0):.4f}')
    print(f'加权平均F1-score: {metrics.f1_score(y_test, y_pred, average="weighted", zero_division=0):.4f}')

    # 混淆矩阵
    print('\n混淆矩阵:')
    conf_matrix = metrics.confusion_matrix(y_test, y_pred)
    print(conf_matrix)

    # 分类报告 - 添加 zero_division=0
    print('\n分类报告:')
    print(metrics.classification_report(
        y_test, y_pred,
        target_names=class_names,
        zero_division=0,  # 解决警告的关键参数
        digits=4  # 提高精度显示
    ))

    analyze_feature_importance(dt, selected_features)
    # 在train_and_evaluate函数末尾添加：
    visualize_decision_tree(dt, selected_features, class_names)

    return dt


# 在模型评估后添加特征重要性分析
def analyze_feature_importance(model, feature_names):
    """分析特征重要性"""
    importances = model.feature_importances_
    indices = np.argsort(importances)[::-1]

    print("\n===== 特征重要性排序 =====")
    for f in range(len(feature_names)):
        print(f"{feature_names[indices[f]]:>25}: {importances[indices[f]]:.4f}")

    # 可视化
    plt.figure(figsize=(12, 6))
    plt.title("Feature Importances")
    plt.bar(range(len(feature_names)), importances[indices], align='center')
    plt.xticks(range(len(feature_names)), [feature_names[i] for i in indices], rotation=90)
    plt.tight_layout()
    plt.show()


# 添加决策树可视化
def visualize_decision_tree(model, feature_names, class_names):
    """可视化决策树"""
    from sklearn.tree import plot_tree

    plt.figure(figsize=(25, 15))
    plot_tree(
        model,
        feature_names=feature_names,
        class_names=class_names,
        filled=True,
        rounded=True,
        max_depth=3,  # 只显示前3层
        fontsize=10
    )
    plt.title("决策树结构 (前3层)")
    plt.show()


if __name__ == '__main__':
    # 1. 加载和预处理数据
    df, class_names = load_and_preprocess_data()

    # 2. 特征工程
    X, y = feature_engineering(df)

    # 3. 训练和评估模型
    model = train_and_evaluate(X, y, class_names)