import pandas as pd
from scipy.io import arff
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import joblib  # 导入 joblib 用于保存模型

# 读取 .arff 文件
file_path = r'D:\EDAG下载\NASADefectDataset-master\OriginalData\MDP\KC1.arff'
data, meta = arff.loadarff(file_path)

# 将 ARFF 数据转换为 Pandas DataFrame
df = pd.DataFrame(data)

# 处理数据类型
df = df.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)

# 处理缺失值
imputer = SimpleImputer(strategy='mean')
df_imputed = pd.DataFrame(imputer.fit_transform(df.select_dtypes(include=['number'])), columns=df.select_dtypes(include=['number']).columns)

# 处理标签列
df_imputed['Defective'] = df['Defective'].apply(lambda x: 1 if x == 'Y' else 0)

# 特征和标签
X = df_imputed.drop('Defective', axis=1)
y = df_imputed['Defective']

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)

# 参数调整
best_accuracy = 0
best_params = {}
for n_estimators in [10, 50, 100, 200]:  # 尝试不同的决策树数量
    for max_depth in [None, 10, 20, 30]:  # 尝试不同的树的最大深度
        model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
        model.fit(X_train, y_train)
        y_pred = model.predict(X_test)
        accuracy = (y_pred == y_test).mean()
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_params = {'n_estimators': n_estimators, 'max_depth': max_depth}

print(f"Best parameters: {best_params} with accuracy: {best_accuracy}")

# 使用最佳参数训练模型
model = RandomForestClassifier(**best_params, random_state=42)
model.fit(X_train, y_train)

# 保存模型
joblib.dump(model, '../model/best_random_forest_model.pkl')

# 预测
y_pred = model.predict(X_test)
y_pred_prob = model.predict_proba(X_test)[:, 1]

# 评估模型
report = classification_report(y_test, y_pred, output_dict=True)
conf_matrix = confusion_matrix(y_test, y_pred)
print(report)
print(conf_matrix)

# 定义颜色
color_0 = 'skyblue'
color_1 = 'lightgreen'

# 创建类别0的评估指标直方图
metrics_0 = {
    'Accuracy': report['accuracy'],
    'Precision_0': report['0']['precision'],
    'Recall_0': report['0']['recall'],
    'F1_Score_0': report['0']['f1-score']
}

plt.figure(figsize=(6, 6))
plt.bar(metrics_0.keys(), metrics_0.values(), color=color_0)
plt.title('Classification Metrics for Class 0 (No Defect)')
plt.ylabel('Score')
plt.tight_layout()
plt.savefig('picture/RF/pclassification_metrics_0.png')  # 保存文件到当前目录
plt.show()

# 创建类别1的评估指标直方图
metrics_1 = {
    'Accuracy': report['accuracy'],
    'Precision_1': report['1']['precision'],
    'Recall_1': report['1']['recall'],
    'F1_Score_1': report['1']['f1-score']
}

plt.figure(figsize=(6, 6))
plt.bar(metrics_1.keys(), metrics_1.values(), color=color_1)
plt.title('Classification Metrics for Class 1 (Defect)')
plt.ylabel('Score')
plt.tight_layout()
plt.savefig('picture/RF/classification_metrics_1.png')  # 保存文件到当前目录
plt.show()

# 计算ROC曲线和AUC
fpr, tpr, _ = roc_curve(y_test, y_pred_prob)
roc_auc = auc(fpr, tpr)

# 创建ROC曲线图
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig('picture/RF/roc_curve_and_auc.png')
plt.show()

# 创建一个热图显示混淆矩阵
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=['No Defect', 'Defect'], yticklabels=['No Defect', 'Defect'])
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix')
plt.savefig('picture/RF/confusion_matrix.png')
plt.show()