# RandomForest.py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
import joblib
from util.LoadData import load_data
from charts.Draw import plot_metrics, plot_roc_curve, plot_confusion_matrix

# 预处理数据，包括处理缺失值和转换标签
def preprocess_data(df):
    # 检查 'Defective' 列是否存在
    if 'Defective' not in df.columns:
        raise ValueError("DataFrame does not contain 'Defective' column.")
    imputer = SimpleImputer(strategy='mean')  # 使用均值策略填充缺失值
    df_imputed = pd.DataFrame(imputer.fit_transform(df.select_dtypes(include=['number'])), columns=df.select_dtypes(include=['number']).columns)
    # 将 Defective 列中的标签从 'Y' 和 'N' 转换为 1 和 0
    df_imputed['Defective'] = df['Defective'].apply(lambda x: 1 if x == 'Y' else 0)
    return df_imputed

# 划分数据集为特征和标签，并进行标准化和数据集划分
def split_data(df):
    X = df.drop('Defective', axis=1)  # 特征数据
    y = df['Defective']  # 标签数据
    scaler = StandardScaler()  # 数据标准化
    X_scaled = scaler.fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)  # 划分数据集
    return X_train, X_test, y_train, y_test

# 调整参数以寻找最佳模型
def tune_parameters(X_train, y_train, X_test, y_test):
    best_accuracy = 0
    best_params = {}
    for n_estimators in [10, 50, 100, 200]:  # 尝试不同的决策树数量
        for max_depth in [None, 10, 20, 30]:  # 尝试不同的树的最大深度
            model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
            model.fit(X_train, y_train)
            y_pred = model.predict(X_test)
            accuracy = (y_pred == y_test).mean()
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_params = {'n_estimators': n_estimators, 'max_depth': max_depth}
    return best_params, best_accuracy

# 使用最佳参数训练模型并保存
def train_and_save_model(X_train, y_train, params):
    model = RandomForestClassifier(**params, random_state=42)
    model.fit(X_train, y_train)
    joblib.dump(model, 'model/best_random_forest_model.pkl')  # 保存模型
    return model

# 主函数，用于执行整个流程
def main():
    file_path = r'D:\EDAG下载\NASADefectDataset-master\OriginalData\MDP\KC1.arff'
    df = load_data(file_path)  # 加载数据
    df_imputed = preprocess_data(df)  # 预处理数据
    X_train, X_test, y_train, y_test = split_data(df_imputed)  # 划分数据集
    params, best_accuracy = tune_parameters(X_train, y_train, X_test, y_test)  # 调整参数
    print(f"Best parameters: {params} with accuracy: {best_accuracy}")
    model = train_and_save_model(X_train, y_train, params)  # 训练并保存模型
    y_pred = model.predict(X_test)  # 进行预测
    y_pred_prob = model.predict_proba(X_test)[:, 1]  # 获取预测概率
    report = classification_report(y_test, y_pred, output_dict=True)  # 生成分类报告
    conf_matrix = confusion_matrix(y_test, y_pred)  # 生成混淆矩阵

    # 绘制评估指标
    metrics_0 = {
        'Accuracy': report['accuracy'],
        'Precision_0': report['0']['precision'],
        'Recall_0': report['0']['recall'],
        'F1_Score_0': report['0']['f1-score']
    }
    plot_metrics(metrics_0, 'Classification Metrics for Class 0 (No Defect)', 'picture/RF/classification_metrics_0.png', 'skyblue')

    metrics_1 = {
        'Accuracy': report['accuracy'],
        'Precision_1': report['1']['precision'],
        'Recall_1': report['1']['recall'],
        'F1_Score_1': report['1']['f1-score']
    }
    plot_metrics(metrics_1, 'Classification Metrics for Class 1 (Defect)', 'picture/RF/classification_metrics_1.png', 'lightgreen')

    # 绘制 ROC 曲线
    plot_roc_curve(y_test, y_pred_prob, 'picture/RF/roc_curve_and_auc.png')

    # 绘制混淆矩阵
    plot_confusion_matrix(conf_matrix, 'picture/RF/confusion_matrix.png')

# 程序入口点
if __name__ == '__main__':
    main()