import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from tqdm import tqdm


# 数据清洗，去掉标签列末尾的 '.'
def clean_labels(data, label_column):
    data[label_column] = data[label_column].str.rstrip(".")  # 去掉末尾的点
    return data


# 读取数据
file_path = "./data/kddcup.data_10_percent"
columns = [
    "duration",
    "protocol_type",
    "service",
    "flag",
    "src_bytes",
    "dst_bytes",
    "land",
    "wrong_fragment",
    "urgent",
    "hot",
    "num_failed_logins",
    "logged_in",
    "num_compromised",
    "root_shell",
    "su_attempted",
    "num_root",
    "num_file_creations",
    "num_shells",
    "num_access_files",
    "num_outbound_cmds",
    "is_host_login",
    "is_guest_login",
    "count",
    "srv_count",
    "serror_rate",
    "srv_serror_rate",
    "rerror_rate",
    "srv_rerror_rate",
    "same_srv_rate",
    "diff_srv_rate",
    "srv_diff_host_rate",
    "dst_host_count",
    "dst_host_srv_count",
    "dst_host_same_srv_rate",
    "dst_host_diff_srv_rate",
    "dst_host_same_src_port_rate",
    "dst_host_srv_diff_host_rate",
    "dst_host_serror_rate",
    "dst_host_srv_serror_rate",
    "dst_host_rerror_rate",
    "dst_host_srv_rerror_rate",
    "label",
]

print("正在读取数据...")
chunks = []
for chunk in tqdm(
    pd.read_csv(file_path, names=columns, chunksize=1000), desc="正在读取数据..."
):
    chunks.append(chunk)
data = pd.concat(chunks, axis=0, ignore_index=True)
# data = pd.read_csv(file_path, header=None, names=columns)
total_steps = 7  # 总步骤数
with tqdm(total=total_steps, desc="数据处理进度") as pbar:
    # 清洗标签列
    data = clean_labels(data, "label")
    pbar.update(1)

    # 攻击类别映射
    attack_mapping = {
        "back": "Dos",
        "neptune": "Dos",
        "smurf": "Dos",
        "teardrop": "Dos",
        "land": "Dos",
        "pod": "Dos",
        "satan": "Probe",
        "portsweep": "Probe",
        "ipsweep": "Probe",
        "nmap": "Probe",
        "warezmaster": "R2L",
        "warezclient": "R2L",
        "ftpwrite": "R2L",
        "guesspassword": "R2L",
        "imap": "R2L",
        "multihop": "R2L",
        "phf": "R2L",
        "spy": "R2L",
        "rootkit": "U2R",
        "buffer_overflow": "U2R",
        "loadmodule": "U2R",
        "perl": "U2R",
    }

    # 根据映射表简化 label
    data["label_simplified"] = data["label"].map(attack_mapping).fillna("normal")
    pbar.update(1)
    # 区分数值特征和文本特征
    numeric_features = data.select_dtypes(include=["int64", "float64"]).columns
    categorical_features = ["protocol_type", "service", "flag", "label"]

    # 对数值特征进行归一化
    print("正在归一化数值特征...")
    scaler = MinMaxScaler()
    data[numeric_features] = scaler.fit_transform(data[numeric_features])
    pbar.update(1)
    # data[numeric_features] = scaler.fit_transform(data[numeric_features])
    # pbar.update(1)
    # 对文本特征进行 One-Hot 编码
    print("正在对文本特征进行 One-Hot 编码...")
    one_hot_encoder = OneHotEncoder(sparse_output=False, handle_unknown="ignore")
    categorical_encoded = one_hot_encoder.fit_transform(data[categorical_features])
    categorical_encoded_df = pd.DataFrame(
        categorical_encoded,
        columns=one_hot_encoder.get_feature_names_out(categorical_features),
    )
    # 给文本特征 One-Hot 编码后的列名加 * 前缀
    categorical_encoded_df.columns = [
        "*" + col for col in categorical_encoded_df.columns
    ]
    pbar.update(1)

    # 合并编码后的文本特征和原数据
    data = pd.concat(
        [data.reset_index(drop=True), categorical_encoded_df.reset_index(drop=True)],
        axis=1,
    )
    data = data.drop(columns=categorical_features)
    pbar.update(1)
    # 对 label_simplified 进行 One-Hot 编码
    label_simplified_encoder = OneHotEncoder(
        sparse_output=False, handle_unknown="ignore"
    )
    label_simplified_encoded = label_simplified_encoder.fit_transform(
        data[["label_simplified"]]
    )
    label_simplified_encoded_df = pd.DataFrame(
        label_simplified_encoded,
        columns=label_simplified_encoder.get_feature_names_out(["label_simplified"]),
    )
    # 修改 label_simplified 编码后的列名，以 * 开头
    label_simplified_encoded_df.columns = [
        "*" + col for col in label_simplified_encoded_df.columns
    ]
    pbar.update(1)
    # 合并编码后的 label_simplified 列和原数据
    print("正在合并数据...")
    data = pd.concat(
        [
            data.reset_index(drop=True),
            label_simplified_encoded_df.reset_index(drop=True),
        ],
        axis=1,
    )
    data = data.drop(columns=["label_simplified"])
    pbar.update(1)
    # 手动调整列顺序
    new_columns = []
    for col in columns:
        if col in categorical_features:
            # 找到以 *col_ 开头的所有列名
            encoded_cols = [c for c in data.columns if c.startswith(f"*{col}_")]
            new_columns.extend(encoded_cols)
        elif col == "label":
            # 找到所有以 *label_simplified_ 开头的列名
            label_encoded_cols = [
                c for c in data.columns if c.startswith("*label_simplified_")
            ]
            new_columns.extend(label_encoded_cols)
        else:
            new_columns.append(col)

    # 按照新的列顺序重新排列数据
    data = data[new_columns]

    # 分表 1：包含 normal 和攻击类别
    data_with_normal = data.copy()

    # 分表 2：去除 normal，仅保留攻击类别
    attack_columns = [
        col for col in label_simplified_encoded_df.columns if "normal" not in col
    ]
    data_without_normal = data[data[attack_columns].sum(axis=1) > 0].copy()

    # 保存数据
    print("正在保存处理后的数据...")
    with tqdm(total=2, desc="保存数据") as save_pbar:
        chunks = np.array_split(data_with_normal.index, 100)  # split into 100 chunks

        # for chunck, subset in enumerate(tqdm(chunks)):
        #     if chunck == 0:  # first row
        #         data_with_normal.loc[subset].to_csv(
        #             "./data/data_with_normal.csv", mode="w", index=True
        #         )
        #     else:
        #         data_with_normal.loc[subset].to_csv(
        #             "./data/data_with_normal.csv", header=None, mode="a", index=True
        #         )
        data_with_normal.to_csv("./data/data_with_normal.csv", index=False)
        save_pbar.update(1)
        # chunks = np.array_split(data_without_normal.index, 100)  # split into 100 chunks
        # for chunck, subset in enumerate(tqdm(chunks)):
        #     if chunck == 0:  # first row
        #         data_without_normal.loc[subset].to_csv(
        #             "./data/data_without_normal.csv", mode="w", index=True
        #         )
        #     else:
        #         data_without_normal.loc[subset].to_csv(
        #             "./data/data_without_normal.csv", header=None, mode="a", index=True
        #         )
        data_without_normal.to_csv("./data/data_without_normal.csv", index=False)
        save_pbar.update(1)
        pbar.update(1)
    # data_with_normal.to_csv("./data/data_with_normal.csv", index=False)
    # data_without_normal.to_csv("./data/data_without_normal.csv", index=False)

    print("数据处理完成，所有文件已生成！")