import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score
from sklearn.model_selection import train_test_split, cross_val_score
from imblearn.under_sampling import RandomUnderSampler
import joblib
from sklearn.preprocessing import LabelEncoder

def pre_process():
    print("正在读取文件...")
    path = '/opt/arkime_ML/catch_es/out/web/time_data_18.csv.label.csv'
    df = pd.read_csv(path, engine='python')
    df = df.drop(columns=[col for col in
                          ['id', 'THREAT_TIME', 'SIP', 'S_PORT', 'DIP', 'D_PORT', 'XFF_IP', 'PROTOCOL', 'DENY_METHOD',
                           'THREAT_SUMMARY', 'SEVERITY'] if col in df.columns])
    df.replace([np.inf, -np.inf, np.nan], -1, inplace=True)
    return df

def label_encoder(df, le_dict=None):
    print("开始向量化")
    string_features = list(df.select_dtypes(include=['object']).columns)
    if le_dict is None:
        le_dict = {col: LabelEncoder() for col in string_features}
        for col in string_features:
            df[col] = le_dict[col].fit_transform(df[col].astype(str))
    else:
        for i, col in enumerate(string_features):
            print(f"向量化：{i + 1}/{len(string_features)}")
            if col in le_dict:
                unseen_labels = set(df[col].astype(str)) - set(le_dict[col].classes_)
                if unseen_labels:
                    le_dict[col].classes_ = np.append(le_dict[col].classes_, list(unseen_labels))
                df[col] = le_dict[col].transform(df[col].astype(str))
            else:
                le_dict[col] = LabelEncoder()
                df[col] = le_dict[col].fit_transform(df[col].astype(str))
    return df, le_dict

def under_sample(df):
    print("开始欠采样")
    return RandomUnderSampler(random_state=42).fit_resample(df.drop(columns=['isDangerous']), df['isDangerous'].values)

def training(x_train, y_train):
    random_forest = RandomForestClassifier(n_estimators=100, random_state=0)
    random_forest.fit(x_train, y_train)
    return random_forest

def verify(random_forest, x_train, y_train):
    if x_train is not None and y_train is not None:
        print('进行10折交叉验证')
        scores = cross_val_score(random_forest, x_train, y_train, cv=10)
        print("交叉验证分数:", scores)
        print("交叉验证平均数:", scores.mean())
    else:
        print("x_train或y_train为空，无法进行交叉验证")

if __name__ == '__main__':
    pd.set_option('display.max_columns', None)
    pd.set_option('display.max_rows', None)
    x_train, y_train, x_test, y_test = None, None, None, None
    label_encoder_file = 'label_encoder.pkl'
    model_file = 'random_forest_model.pkl'
    feature_names_file = 'feature_names.pkl'
    le_dict = None
    random_forest = None
    feature_names = None
    try:
        random_forest = joblib.load(model_file)
        le_dict = joblib.load(label_encoder_file)
        feature_names = joblib.load(feature_names_file)
        print("模型和LabelEncoder加载成功，开始增量训练")
    except FileNotFoundError:
        print("未找到模型文件或LabelEncoder文件，重新训练模型")
    df = pre_process()
    df, le_dict = label_encoder(df, le_dict)
    X, Y = under_sample(df)
    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
    if random_forest is None:
        print("开始训练")
        random_forest = training(x_train, y_train)
        feature_names = list(x_train.columns)
    else:
        print(f"开始增量训练：{random_forest}")
        # 对齐新特征
        new_feature_names = list(X.columns)
        for feature in new_feature_names:
            if feature not in feature_names:
                feature_names.append(feature)
        X = X[feature_names].fillna(-1)
        random_forest.fit(X, Y)
    joblib.dump(random_forest, model_file)
    joblib.dump(le_dict, label_encoder_file)
    joblib.dump(feature_names, feature_names_file)
    verify(random_forest, x_train, y_train)
    if x_test is not None and y_test is not None:
        y_pred = random_forest.predict(x_test)
        print("[正预测为正，正预测为负]\n"
              "[负预测为正，负预测为负]")
        print(confusion_matrix(y_test, y_pred))
        print("精确率:", precision_score(y_test, y_pred))
        print("召回率:", recall_score(y_test, y_pred))
        print("准确率:", accuracy_score(y_test, y_pred))
    else:
        print("x_test或y_test为空，无法计算混淆矩阵")
