# 导入包
import joblib
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.feature_selection import f_classif, chi2
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.model_selection import cross_val_predict, GridSearchCV
from sklearn.metrics import confusion_matrix
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from copy import deepcopy
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn.metrics import precision_recall_curve, average_precision_score


# 人工智能精准医学培训模型
class AIpmt:
    def __init__(self, X, y):
        self.X = X
        self.y = y
        self.feature_importance = {
            "ANOVA": f_classif,
            "Chi2": chi2
            }
        self.train_model_list = {
            "LR": LogisticRegression(),
            "SVM": svm.SVC(kernel='rbf', probability=True)
            }
        self.param_grid = {
            "LR": {'C': [0.001, 0.01, 0.1, 1, 10], 'penalty': ['l1', 'l2']},
            "SVM": {'C': list(2 ** i for i in range(-5, 15 + 1, 2)), 'gamma': list(2 ** i for i in range(-15, 3 + 1, 2))}
            }
        return
        
    def fit(self, fs_method="ANOVA", ifs_method=None, ifs_grid=False, ifs_cv=5):
        # 特征归一化
        scaler = StandardScaler()
        self.X = pd.DataFrame(scaler.fit_transform(self.X), columns=self.X.columns)
        # 特征相关性
        self.correlations = self.X.corrwith(self.y)
        # 特征重要性分析
        self.fs_scores = self.feature_importance[fs_method](self.X, self.y)[0]
        # 特征重要性排序
        sorted_indices = self.fs_scores.argsort()
        self.X_sorted = self.X.iloc[:, sorted_indices[::-1]]
        # 增量特征筛选
        if ifs_method:
            results = []
            for i in tqdm(range(1, len(self.X_sorted.columns) + 1)):
                selected_features = self.X_sorted.iloc[:, :i]
                model = self.train(selected_features, self.y, grid=ifs_grid, method=ifs_method, cv=ifs_cv)
                cv_result = self.cv_test(selected_features, self.y, model, cv=ifs_cv)
                # 计算平均值并存储结果
                results.append({
                    'Num Features': i,
                    'ACC': cv_result['ACC'],
                    'SN': cv_result['SN'],
                    'SP': cv_result['SP'],
                    'MCC': cv_result['MCC'],
                    'F1': cv_result['F1']
                })
            self.ifs_results = pd.DataFrame(results)
    
    def transform(self, X, evaluate="F1"):
        # 特征归一化
        scaler = StandardScaler()
        X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
        try:
            return X[self.X_sorted.iloc[:, :self.ifs_results['Num Features'][self.ifs_results[evaluate].idxmax()]].columns]
        except:
            return X[self.X_sorted.columns]

    def train(self, X, y, grid=False, method='LR', cv=5):
        model = deepcopy(self.train_model_list[method])
        model.fit(X, y)
        if grid:
            bestModel = GridSearchCV(model, self.param_grid[method], cv=cv, scoring="accuracy", return_train_score=False, n_jobs=1)
            model = bestModel.fit(X, y)
        return model
    
    def cv_test(self, X, y, model, cv=5):
        # 进行交叉验证并计算各项指标
        y_pred = cross_val_predict(model, X, y, cv=cv)
        cm, ACC, SN, SP, MCC, F1 = self.calculate_metrics(y, y_pred)
        return {'ACC': round(ACC, 3),
                'SN': round(SN, 3),
                'SP': round(SP, 3),
                'MCC': round(MCC, 3),
                'F1': round(F1, 3),
                'cm': cm
            }
    
    def predict(self, X, model, threshoud=0.5):
        y_pred_proba = model.predict_proba(X)
        y_pred = (y_pred_proba[:, 1] > threshoud).astype(int)
        return y_pred, y_pred_proba
    
    def calculate_metrics(self, y, y_pred):
        # 获取混淆矩阵
        tn, fp, fn, tp = confusion_matrix(y, y_pred).ravel()
        cm = pd.DataFrame([[tn, fp],[fn, tp]], columns=list(set([*y, *y_pred])), index=list(set([*y, *y_pred])))
        # 计算准确率（Accuracy）
        accuracy = (tp + tn) / (tp + tn + fp + fn)
        # 计算灵敏度（Sensitivity/Recall/True Positive Rate）
        sensitivity = tp / (tp + fn)
        # 计算特异度（Specificity/True Negative Rate）
        specificity = tn / (tn + fp)
        # 计算Matthews相关系数（Matthews Correlation Coefficient/MCC）
        mcc_denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
        mcc = (tp * tn - fp * fn) / mcc_denominator if mcc_denominator != 0 else 0
        # 计算F1分数（F1 Score）
        precision = tp / (tp + fp) if (tp + fp) != 0 else 0
        recall = tp / (tp + fn) if (tp + fn) != 0 else 0
        f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0
        return cm, accuracy, sensitivity, specificity, mcc, f1_score
    
    def plot_density(self, X, y, cols=7, title="Density Histogram of Each Feature", out="Feature Density.png"):
        # 设置子图行数
        rows = (len(X.columns) + cols - 1) // cols  # 计算行数
        # 绘制组合图
        fig, axs = plt.subplots(rows, cols, figsize=(cols*3, rows*2.7))  # 设置图形的大小
        fig.suptitle(title, fontsize=16)  # 设置总标题
        for i, feature in enumerate(X.columns):
            row_idx = i // cols
            col_idx = i % cols
            ax = axs[row_idx, col_idx]
            for label_class in y.unique():
                data_class = X[y == label_class]
                data_class[feature].plot(kind='density', ax=ax, title=feature, label=f"Class {label_class}")
            ax.set_xlabel('')
            ax.set_ylabel('Density')
            ax.legend()
        # 删除未使用的子图
        if len(X.columns) < rows * cols:
            for i in range(len(X.columns), rows * cols):
                fig.delaxes(axs.flatten()[i])
        plt.tight_layout()  # 调整子图的布局，避免重叠
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_cluster(self, X, y, cluster='PCA', title="Feature Dimensionality Reduction Cluster", out="Feature Cluster.png"):
        if cluster == 'PCA':
            # 初始化PCA，并将特征降至2维
            pca = PCA(n_components=2)
            reduced_features = pca.fit_transform(X)
        if cluster == 'TSNE':
            # 初始化TSNE，并将特征降至2维
            tsne = TSNE(n_components=2, random_state=42)
            reduced_features = tsne.fit_transform(X)
        # 将降维后的数据和标签合并
        data_with_labels = pd.DataFrame(reduced_features, columns=[f'{cluster}1', f'{cluster}2'])
        data_with_labels['label'] = y
        # 根据label列分组，绘制不同颜色的点
        plt.figure(figsize=(5, 5))
        for label, group in data_with_labels.groupby('label'):
            plt.scatter(group[f'{cluster}1'], group[f'{cluster}2'], label=f'Cluster {int(label)}', alpha=0.7, color='#FE8011' if label > 0 else '#2279B5')
        # 设置图例和坐标轴标签
        plt.title(title)
        plt.legend()
        plt.xlabel(f'{cluster} Component 1')
        plt.ylabel(f'{cluster} Component 2')
        # 保存图像
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_correlation(self, X, y, title="Feature Correlations with Label", out="Feature Correlations.png"):
        # 计算相关系数
        correlations = X.corrwith(y)
        # 绘制柱状图
        plt.figure(figsize=(14, 6))
        plt.bar(correlations.index, correlations, color=['#FE8011' if corr > 0 else '#2279B5' for corr in correlations])
        plt.xticks(rotation=45, ha='right')
        plt.title(title)
        plt.ylabel('Correlation')
        # 保存图像
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_lines(self, results_df, title="Performance Metrics vs. Number of Features", out="Feature Selection.png"):
        plt.figure(figsize=(10, 6))
        plt.plot(results_df['Num Features'], results_df['ACC'], label='Accuracy')
        plt.plot(results_df['Num Features'], results_df['SN'], label='Sensitivity')
        plt.plot(results_df['Num Features'], results_df['SP'], label='Specificity')
        plt.plot(results_df['Num Features'], results_df['MCC'], label='MCC')
        plt.plot(results_df['Num Features'], results_df['F1'], label='F1 Score')
        # 添加图例和标签
        plt.legend(loc='lower right')
        plt.xlabel('Number of Features')
        plt.ylabel('Metrics Value')
        plt.title(title)
        plt.grid(True)
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_cm(self, cm, out='Confusion_Matrix.png'):
        # 获取混淆矩阵的类别数
        num_classes = cm.shape[0]
        # 创建一个图像窗口
        plt.figure(figsize=(4, 4))
        # 绘制热力图
        plt.imshow(cm, interpolation='nearest', cmap=LinearSegmentedColormap.from_list('custom_cmap', ['#FFFFFF', '#2486B9', '#005E91'], N=256))
        # 添加颜色条
        plt.colorbar()
        # 设置坐标轴标签
        plt.xlabel('Predicted Labels')
        plt.ylabel('True Labels')
        # 设置坐标轴刻度
        tick_marks = np.arange(num_classes)
        plt.xticks(tick_marks, range(num_classes))
        plt.yticks(tick_marks, range(num_classes))
        # 设置坐标轴刻度标签的字体大小
        plt.xticks(fontsize=10)
        plt.yticks(fontsize=10)
        # 在热力图中添加数值标签
        for i in range(num_classes):
            for j in range(num_classes):
                plt.text(j, i, str(cm.iloc[i, j]), ha='center', va='center', color='black', fontsize=12)
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_roc(self, X, y, model, cv=5, title="5-Fold Cross-Validated ROC Curve", out="ROC Curve.png"):
        model = deepcopy(model)
        # 创建五折交叉验证对象
        cv = StratifiedKFold(n_splits=cv, shuffle=True, random_state=42)
        # 创建空列表存储每个折叠的ROC曲线数据
        all_fpr = []
        all_tpr = []
        mean_fpr = np.linspace(0,1,50)
        # 创建绘图所需的颜色列表
        colors = ['b', 'g', 'r', 'c', 'm']
        # 进行五折交叉验证并计算ROC曲线
        plt.figure(figsize=(8, 6))
        for i, (train_index, test_index) in enumerate(cv.split(X, y)):
            X_train, X_test = X.iloc[train_index], X.iloc[test_index]
            y_train, y_test = y.iloc[train_index], y.iloc[test_index]
            model.fit(X_train, y_train)
            y_pred_prob = model.predict_proba(X_test)[:, 1]
            fpr, tpr, _ = roc_curve(y_test, y_pred_prob)
            # 存储每个折叠的ROC曲线数据
            all_fpr.append(fpr)
            all_tpr.append(interp(mean_fpr, fpr, tpr))
            # 绘制每个折叠的ROC曲线
            plt.plot(fpr, tpr, color=colors[i], alpha=0.2, label=f'Fold {i+1} (AUC = {round(auc(fpr, tpr), 2)})')
        # 计算平均ROC曲线和ROC曲线的标准差
        mean_tpr = np.mean(all_tpr, axis=0)
        std_tpr = np.std(all_tpr, axis=0)
        # 计算AUC（曲线下面积）
        roc_auc = auc(mean_fpr, mean_tpr)
        # 绘制平均ROC曲线和标准差区域
        plt.plot(mean_fpr, mean_tpr, color='black', label='Mean ROC (AUC = {:.2f})'.format(roc_auc))
        plt.fill_between(mean_fpr, mean_tpr - std_tpr, mean_tpr + std_tpr, color='gray', alpha=0.3, label='± 1 std. dev.')
        plt.plot([0, 1], [0, 1], color='gray', linestyle='--')
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title(title)
        plt.legend(loc='lower right')
        plt.grid(True)
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')
    
    def plot_prc(self, X, y, model, cv=5, title="5-Fold Cross-Validated PRC Curve", out="PRC Curve.png"):
        model = deepcopy(model)
        # 创建五折交叉验证对象
        cv = StratifiedKFold(n_splits=cv, shuffle=True, random_state=42)
        # 创建空列表存储每个折叠的PRC曲线数据
        all_precision = []
        all_recall = []
        mean_precision = np.linspace(0,1,50)
        # 创建绘图所需的颜色列表
        colors = ['b', 'g', 'r', 'c', 'm']
        # 进行五折交叉验证并计算PRC曲线
        plt.figure(figsize=(8, 6))
        for i, (train_index, test_index) in enumerate(cv.split(X, y)):
            X_train, X_test = X.iloc[train_index], X.iloc[test_index]
            y_train, y_test = y.iloc[train_index], y.iloc[test_index]
            model.fit(X_train, y_train)
            y_pred_prob = model.predict_proba(X_test)[:, 1]
            precision, recall, _ = precision_recall_curve(y_test, y_pred_prob)
            # 存储每个折叠的PRC曲线数据
            all_precision.append(precision)
            all_recall.append(interp(mean_precision, precision, recall))
            # 绘制每个折叠的PRC曲线
            plt.plot(recall, precision, color=colors[i], alpha=0.6, label=f'Fold {i+1} (AUC = {round(average_precision_score(y_test, y_pred_prob), 2)})')
        # 计算平均PRC曲线和PRC曲线下面积
        mean_recall = np.mean(all_recall, axis=0)
        avg_precision = average_precision_score(y, model.predict_proba(X)[:, 1])
        # 绘制平均PRC曲线
        plt.plot(mean_recall, mean_precision, color='black', label='Mean PRC (Avg. Precision = {:.2f})'.format(avg_precision))
        plt.plot([0, 1], [1, 0], color='gray', linestyle='--')
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.title(title)
        plt.legend(loc='lower right')
        plt.grid(True)
        if out:
            plt.savefig(out, dpi=300, bbox_inches='tight')