import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from imblearn.over_sampling import RandomOverSampler
from utils import draw
from scipy.io import arff
import numpy as np
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mlp_model import MLP


#读取csv数据集
def data_handle(filename):
    read_data = pd.read_csv(filename)
    # print(read_data)
    list_datasets = []
    category_labels = []
    for i in range(len(read_data)):
        list_data = []
        for j in range(len(read_data.iloc[i, :]) - 1):
            row_data = read_data.iloc[i, j]  # 读取每个样本的每个数据
            list_data.append(row_data)  # 将每个数据存入列表
        list_datasets.append(list_data)  # 将每个样本的数据存入列表

        row_data_label = read_data.iloc[i, len(read_data.iloc[i, :]) - 1]  # 读取每个样本的类别标签
        if row_data_label == "b'clean'":
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)

    # print(list_datasets)
    # print(category_labels)
    return list_datasets, category_labels

#读取arff数据集
def data_handle_arff(filename):
    data,meta = arff.loadarff(filename)
    data = pd.DataFrame(data)
    datasets = data.iloc[:, :-1]
    labels = data.iloc[:, -1]
    # 将特征数据转为数组
    datasets = np.array(datasets)
    # print(datasets)
    # 标签的转换为0、1
    category_labels = []
    labels = np.array(labels)
    for i in range(len(labels)):
        if labels[i] == b'clean':
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)
    # print(labels)

    return datasets,category_labels

# 预测新数据
new_features,new_labels = data_handle('feature/PDE.csv')
# new_features,new_labels = data_handle_arff('feature/JDT.csv')

new_features = torch.Tensor(new_features)
input_dim = new_features.shape[1]


# 加载训练好的模型
model = MLP(input_dim)
model.load_state_dict(torch.load('model_mlp.pt'))
model.eval()

new_inputs = torch.Tensor(new_features)
new_labels = torch.Tensor(new_labels).unsqueeze(1)

# 特征选择
selector = VarianceThreshold(threshold=0.01)
selected_features = selector.fit_transform(new_inputs)

# PCA降维
pca = PCA(n_components=2)
pca_features = pca.fit_transform(selected_features)
print(pca_features)

incorrect_samples = []


with torch.no_grad():
    predictions = model(new_inputs)

    predict_accuracy = ((predictions > 0.5).float() == new_labels).sum().item() / len(new_labels)

    predict_labels = (predictions.squeeze(1).numpy() > 0.5).astype(int)
    new_labels = new_labels.squeeze(1).numpy()

    auc = metrics.accuracy_score(new_labels, predict_labels)
    macro = metrics.precision_score(new_labels, predict_labels, average='macro')
    micro = metrics.precision_score(new_labels, predict_labels, average='micro')
    macro_recall = metrics.recall_score(new_labels, predict_labels, average='macro')
    weighted = metrics.f1_score(new_labels, predict_labels, average='weighted')
    print('准确率:', auc)  # 预测准确率输出
    print('宏平均精确率:', macro)  # 预测宏平均精确率输出
    print('微平均精确率:', micro)  # 预测微平均精确率输出
    print('宏平均召回率:', macro_recall)  # 预测宏平均召回率输出
    print('平均F1-score:', weighted)  # 预测平均f1-score输出
    print('混淆矩阵输出:\n', metrics.confusion_matrix(new_labels, predict_labels))  # 混淆矩阵输出
    print('分类报告:', metrics.classification_report(new_labels, predict_labels))  # 分类报告输出
    draw.plot_roc(new_labels, predict_labels, auc, macro, macro_recall, weighted)  # 绘制ROC曲线并求出AUC值

    for i, prediction in enumerate(predictions):
        predicted_label = 1 if prediction > 0.5 else 0  # 基于阈值0.5将概率转换为二进制标签
        true_label = int(new_labels[i])
        if predicted_label != true_label:
            # print("样本：{}，预测标签：{}，实际标签：{}".format(i, predicted_label, true_label))
            # print("特征向量：", new_inputs[i])
            incorrect_samples.append(i)

# 绘制散点图
plt.scatter(pca_features[:, 0], pca_features[:, 1], c=new_labels)
plt.scatter(pca_features[incorrect_samples, 0], pca_features[incorrect_samples, 1], c='r', label='Incorrect')
plt.legend()
plt.show()
