from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,recall_score,classification_report,accuracy_score
import os
import numpy as np
import pandas as pd
from scipy import signal
from keras.utils import np_utils
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import dump, load
import pickle
from data_loader import DataLoader
from Data_Preprocessing import SpectraPreprocessor

class model_machine:
    @staticmethod
    def kfold_pls(x, y, nsplits, pls_components, random_state=None):
        cval = KFold(n_splits=nsplits, shuffle=True, random_state=random_state)
        cval.get_n_splits(x,y)
        # Define RMSE array
        re = np.zeros(pls_components).astype('float32')
        # Loop over the number of components
        for comp in range(pls_components):
            RE = []
            # Cross-Validation
            for train, test in cval.split(x):
                # Run basic pls
                    Y_pred1 = model_machine.base_pls(x[train,:],y[train],x[test,:], comp+1)
                    # Populate the indices of the CV array corresponding to this split
                    RE.append(mean_squared_error(y[test],Y_pred1))
                # Calculate the average of the RMSE over the splits for the selected number of components
            re[comp] = np.mean(np.array(RE))
            print('pls_components是', comp, '误差为', re[comp])
            # Get the minimum position
        rmsemin = np.argmin(re)
        return rmsemin,re

    @staticmethod
    def base_pls(x_train,y_train, x_test,n_components):
        # Define PLS model and fit it to the train set
        pls = PLSRegression(n_components=n_components)
        pls.fit(x_train,y_train)
        Y_pred = pls.predict(x_test)
        return Y_pred

    @staticmethod
    def train_pls_model(pls, x_train, y_train, result_dir=None):
        history = pls.fit(x_train, y_train)
        if result_dir is not None:
            model_file_path = os.path.join(result_dir, 'pls_model.pkl')
            with open(model_file_path, 'wb') as f:
                pickle.dump(pls, f)
            print(f'Model saved to {model_file_path}')
        else:
            print('Model not saved.')
        return pls, history

    @staticmethod
    def predict_pls_model(x_test, pls=None, result_dir=None):
        if pls is None:
            if result_dir is None:
                raise ValueError("Either pls model or result_dir must be provided.")
            model_file_path = os.path.join(result_dir, 'pls_model.pkl')
            with open(model_file_path, 'rb') as f:
                pls = pickle.load(f)
            print('Model loaded successfully.')

        y_pred = pls.predict(x_test)
        return y_pred

    @staticmethod
    def evaluate_model(y_true, y_pred, result_dir):
    # 将预测结果和真实标签转换为类别
        y_pred_classes = np.argmax(y_pred, axis=1)
        y_true_classes = np.argmax(y_true, axis=1)

        # 计算混淆矩阵
        conf_matrix = confusion_matrix(y_true_classes, y_pred_classes)
        accuracy = accuracy_score(y_true_classes, y_pred_classes)
        recall = recall_score(y_true_classes, y_pred_classes, average='weighted')
        class_report = classification_report(y_true_classes, y_pred_classes)

        plt.figure(figsize=(8, 6))
        sns.heatmap(conf_matrix, annot=True, fmt="d", cmap="Blues")
        plt.title("Confusion Matrix")
        plt.xlabel("Predicted Label")
        plt.ylabel("True Label")
        plt.savefig(os.path.join(result_dir, 'model_confusion_matrix.png'))
        plt.show()

        # 打印评估结果
        print("混淆矩阵:\n", conf_matrix)
        print("准确率: {:.2f}%".format(accuracy * 100))
        print("召回率: {:.2f}%".format(recall * 100))
        print("分类报告:\n", class_report)

        # 保存评估结果到文件
        if result_dir is not None:
            with open(os.path.join(result_dir, 'pls_evaluation_results.txt'), 'w') as f:
                f.write("混淆矩阵:\n")
                f.write(np.array2string(conf_matrix))
                f.write("\n准确率: {:.2f}%\n".format(accuracy * 100))
                f.write("召回率: {:.2f}%\n".format(recall * 100))
                f.write("分类报告:\n")
                f.write(class_report)

        return {
            'confusion_matrix': conf_matrix,
            'accuracy': accuracy,
            'recall': recall,
            'classification_report': class_report
        }


        # rmse = mean_squared_error(y_test, Y_pred)
        # print('均方误差：\n', rmse)
        # print("回归系数:\n", pls.coef_)
        # print("成分数:\n", pls.n_components)

        # # 绘制箱线图
        # plt.figure(figsize=(10, 6))
        # plt.boxplot([y_test.flatten(), Y_pred.flatten()], labels=['True Values', 'Predicted Values'])
        # plt.ylabel('Value')
        # plt.title('Boxplot of True and Predicted Values')
        # plt.show()

    @staticmethod
    def svc(x_train, x_test, y_train, y_test):
        from sklearn.svm import SVC
        from sklearn.metrics import accuracy_score
        # 将独热编码的 y_train 和 y_test 转换为单一类别标签
        y_train_single = np.argmax(y_train, axis=1)
        y_test_single = np.argmax(y_test, axis=1)
        svm_clf = SVC(kernel='linear', random_state=42)  # 可以选择不同的核函数，如 'linear', 'rbf'
        svm_clf.fit(x_train, y_train_single)  # 训练模型
        # 预测测试集
        svm_predictions = svm_clf.predict(x_test)
        # 计算准确率,混淆矩阵
        svm_accuracy = accuracy_score(y_test_single, svm_predictions)
        print(f"SVC模型的准确率: {svm_accuracy * 100:.2f}%")

        conf_matrix_tree = confusion_matrix(y_test_single, svm_predictions)
        class_report_tree = classification_report(y_test_single, svm_predictions)
        print("svc混淆矩阵:\n", conf_matrix_tree)
        print("svc分类报告:\n", class_report_tree)
        return svm_predictions

    @staticmethod
    def rfc(x_train, x_test, y_train, y_test):
        from sklearn.ensemble import RandomForestClassifier
        # 使用随机森林进行建模
        rf_clf = RandomForestClassifier(random_state=42)
        rf_clf.fit(x_train, y_train)
        rf_predictions = rf_clf.predict(x_test)
        # 计算准确率
        rf_accuracy = accuracy_score(y_test, rf_predictions)
        print(f"rfc模型的准确率: {rf_accuracy * 100:.2f}%")

        rf_predictions = np.argmax(rf_predictions, axis=1)
        y_test_single = np.argmax(y_test, axis=1)
        conf_matrix_tree = confusion_matrix(y_test_single, rf_predictions)
        class_report_tree = classification_report(y_test_single, rf_predictions)
        print("rfc混淆矩阵:\n", conf_matrix_tree)
        print("rfc分类报告:\n", class_report_tree)

        return rf_predictions

    @staticmethod
    def dtc(x_train, x_test, y_train, y_test):
        from sklearn.tree import DecisionTreeClassifier
        tree_reg = DecisionTreeClassifier(
            random_state=44,
            criterion='gini',  # 可选 'gini' 或 'entropy'
            max_depth=8,  # 设置最大深度
            min_samples_split=5,  # 内部节点再划分所需最小样本数
            min_samples_leaf=4  # 叶子节点所需最小样本数
        )
        tree_reg.fit(x_train, y_train)
        tree_predictions = tree_reg.predict(x_test)
        tree_accuracy = accuracy_score(y_test, tree_predictions)
        print(f"决策树模型的准确率: {tree_accuracy * 100:.2f}%")

        tree_predict = np.argmax(tree_predictions, axis=1)
        y_test = np.argmax(y_test, axis=1)
        conf_matrix_tree = confusion_matrix(y_test, tree_predict)
        class_report_tree = classification_report(y_test, tree_predict)
        print("dtc混淆矩阵:\n", conf_matrix_tree)
        print("dtc分类报告:\n", class_report_tree)

        return tree_predictions

    @staticmethod
    def knnc(x_train, x_test, y_train, y_test):
        from sklearn.neighbors import KNeighborsClassifier
        knn = KNeighborsClassifier(n_neighbors=5)  # 可以调整邻居数量
        # 训练 KNN 模型
        knn.fit(x_train, y_train)
        # 使用 KNN 进行预测
        knn_y_pred = knn.predict(x_test)
        # 评估 KNN 模型
        knn_accuracy = accuracy_score(y_test, knn_y_pred)
        print(f"KNN 模型的准确率: {knn_accuracy * 100:.2f}%")

        knn_predict = np.argmax(knn_y_pred, axis=1)
        y_test1 = np.argmax(y_test, axis=1)
        conf_matrix_tree = confusion_matrix(y_test1, knn_predict)
        class_report_tree = classification_report(y_test1, knn_predict)
        print("knnc混淆矩阵:\n", conf_matrix_tree)
        print("knnc分类报告:\n", class_report_tree)

        return knn_y_pred


if __name__ == '__main__':
    ## 测试1  鸭梨
    # 调用数据加载函数
    # data = DataLoader()
    # # 加载鸭梨数据
    # spectra, label, wave = data.load_peach_spectra()
    # # 调用数据预处理函数
    # pre = SpectraPreprocessor()
    # pre.plot_spectra(wave, spectra, title="Spectra", xlabel="Wavelength (nm)")
    # # 数据标准化+sg滤波
    # x = spectra
    # # x = pre.sg(x1)
    #
    # pre.plot_spectra(wave, x, title="Spectra", xlabel="Wavelength (nm)")
    # # 2分类问题，标签进行one-hot编码
    # num_classe = 2
    # y = np_utils.to_categorical(label, num_classes=num_classe)
    # # 预测结果文件夹，存放鸭梨数据
    # result_dir = 'result_yali'
    # os.makedirs(result_dir, exist_ok=True)
    # # 划分训练集和测试集
    # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=43)


    # ## 测试3 混纺织品
    data = DataLoader()
    pre = SpectraPreprocessor()
    # 加载混纺织品数据
    spectra, label, wave = data.load_fabric_spectra()
    # 显示
    # pre.plot_spectra(wave, spectra, title="Spectra", xlabel="Wavelength (nm)")
    # 数据预处理
    x1 = pre.snv(spectra)
    x = pre.sg(x1)
    pre.plot_spectra(wave, x, title="Spectra", xlabel="Wavelength (nm)")
    # 3分类问题，标签进行one-hot编码
    num_classe = 3
    y3 = np_utils.to_categorical(label, num_classes=num_classe)
    # 预测结果文件夹，存放鸭梨数据
    result_dir = 'result_fzp'
    os.makedirs(result_dir, exist_ok=True)
    x_train, x_test, y_train, y_test = train_test_split(x, y3, test_size=0.2, random_state=43)

    # 建模分析
    model  = model_machine()
    # pls-da 进行分析
    # pls, 5折交叉验证搜索PLS最优主成分数
    comp, acc = model.kfold_pls(x, y3, 5, 30, random_state=11)
    # comp = 9
    print('PLS最优主成分数是', comp + 1, '误差为', acc[comp])
    # 定义PLS模型
    pls = PLSRegression(comp + 1)
    # 训练模型
    pls, history = model.train_pls_model(pls, x_train, y_train, result_dir)
    # 预测模型, 使用训练好的pls或者result_dir中保存的模型
    Y_pred = model.predict_pls_model(x_test,  pls=None, result_dir = result_dir)
    # 评估模型
    results = model.evaluate_model(y_test, Y_pred, result_dir)
    # print(results)

    # 使用SVC进行建模
    re_svc = model.svc(x_train, x_test, y_train, y_test)

    # 随机森林,RandomForestClassifier(rfc)
    re_rfc = model.rfc(x_train, x_test, y_train, y_test)

    # 决策树dt
    re_dtc = model.dtc(x_train, x_test, y_train, y_test)

    # knn
    re_knn = model.knnc(x_train, x_test, y_train, y_test)

