import numpy as np
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score, recall_score, precision_score, f1_score, confusion_matrix
from sklearn.preprocessing import  LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer
from sklearn.compose import ColumnTransformer
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, LSTM, Input
from tensorflow.keras.models import Sequential, Model as KerasModel
from tensorflow.keras.callbacks import EarlyStopping, Callback
from hmmlearn import hmm
from scipy.stats import mode
import asyncio
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
from PyQt5.QtWidgets import QApplication
import tensorflow as tf

class ModelProgressCallback(Callback):
    def __init__(self, progress_signal, model_name):
        super().__init__()
        self.progress_signal = progress_signal
        self.model_name = model_name
        self.history = {'loss': [], 'accuracy': []}

        # 定义严重程度函数
        def determine_severity(text):
            severity_keywords = {
                '车辆抛锚': 0.5,
                '车辆交通事故': 1,
                '轻微碰撞': 0.3,
                '严重碰撞': 1.5,
                '人员受伤': 2,
                '交通堵塞': 0.8,
                '道路封闭': 2,
                '无人员伤亡': 0.2,
                '轻微刮擦': 0.1,
                '无交通事故': 0
            }
            severity = 0
            for keyword, value in severity_keywords.items():
                if keyword in text:
                    severity += value
            if severity == 0:
                severity = 0.1  # 默认轻微事故
            return severity

    def on_epoch_end(self, epoch, logs=None):
        if logs is not None:
            self.history['loss'].append(logs.get('loss', 0))
            self.history['accuracy'].append(logs.get('accuracy', 0))
            self.progress_signal.emit({
                'model': self.model_name,
                'epoch': epoch + 1,
                'loss': logs.get('loss', 0),
                'accuracy': logs.get('accuracy', 0),
                'history': self.history
            })

class ModelComparison(QObject):
    progress_signal = pyqtSignal(dict)
    model_completed_signal = pyqtSignal(dict)
    training_completed_signal = pyqtSignal(dict)

    def __init__(self):
        super().__init__()
        self.results = []
        self.moveToThread(QApplication.instance().thread())

    def setup_model_signals(self, progress_callback, completion_callback, final_callback):
        QTimer.singleShot(0, lambda: self._connect_signals(progress_callback, completion_callback, final_callback))

    def _connect_signals(self, progress_callback, completion_callback, final_callback):
        self.progress_signal.connect(progress_callback)
        self.model_completed_signal.connect(completion_callback)
        self.training_completed_signal.connect(final_callback)

    def prepare_features_and_labels(self, data):
        numeric_features = ['SPEED', 'FINT_LANEVOLUME', 'FINT_SECTVOLUME']
        categorical_features = ["FSTR_ROADNUMBER"]

        numeric_transformer = Pipeline(steps=[('scaler', StandardScaler())])
        categorical_transformer = Pipeline(steps=[('onehot', OneHotEncoder(handle_unknown='ignore'))])
        preprocessor = ColumnTransformer(
            transformers=[
                ('num', numeric_transformer, numeric_features),
                ('cat', categorical_transformer, categorical_features)
            ])

        features = data[numeric_features + categorical_features]
        return preprocessor, features

    async def run_model(self, model_name, model_func, X_train, X_test, y_train, y_test, preprocessor, **kwargs):
        self.progress_signal.emit({'model': model_name, 'status': 'start'})
        results = await model_func(X_train, X_test, y_train, y_test, preprocessor, **kwargs)
        self.results.append(results)
        self.model_completed_signal.emit(results)
        return results

    async def run_linear_regression(self, X_train, X_test, y_train, y_test, preprocessor):
        linear_model = Pipeline(steps=[('preprocessor', preprocessor),
                                     ('regressor', LinearRegression())])
        linear_model.fit(X_train, y_train)
        y_pred = linear_model.predict(X_test)
        mse = mean_squared_error(y_test, y_pred)
        
        results = {
            'model': '线性回归',
            'mse': mse,
            'accuracy': None,
            'recall': None,
            'precision': None,
            'f1': None
        }
        return results

    async def run_logistic_regression(self, X_train, X_test, y_train, y_test, preprocessor):
        discretizer = KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='uniform')
        y_binned = discretizer.fit_transform(y_train.reshape(-1, 1)).flatten()
        y_binned_test = discretizer.transform(y_test.reshape(-1, 1)).flatten()

        logistic_model = Pipeline(steps=[
            ('preprocessor', preprocessor),
            ('classifier', LogisticRegression(max_iter=1000))
        ])

        logistic_model.fit(X_train, y_binned)
        y_pred = logistic_model.predict(X_test)

        results = {
            'model': '逻辑回归',
            'mse': mean_squared_error(y_binned_test, y_pred),
            'accuracy': accuracy_score(y_binned_test, y_pred),
            'recall': recall_score(y_binned_test, y_pred, average='weighted'),
            'precision': precision_score(y_binned_test, y_pred, average='weighted'),
            'f1': f1_score(y_binned_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_binned_test, y_pred)
        }
        return results

    async def run_cnn(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
        X_test_preprocessed = preprocessor.transform(X_test).toarray()
        n_features = X_train_preprocessed.shape[1]

        def pad_or_truncate(data, n_timesteps):
            if data.shape[0] % n_timesteps != 0:
                padding_size = n_timesteps - (data.shape[0] % n_timesteps)
                data = np.pad(data, ((0, padding_size), (0, 0)), mode='constant')
            return data

        X_train_preprocessed = pad_or_truncate(X_train_preprocessed, n_timesteps)
        X_test_preprocessed = pad_or_truncate(X_test_preprocessed, n_timesteps)

        X_train_preprocessed = X_train_preprocessed.reshape(-1, n_timesteps, n_features)
        X_test_preprocessed = X_test_preprocessed.reshape(-1, n_timesteps, n_features)

        y_train = y_train[:X_train_preprocessed.shape[0]]
        y_test = y_test[:X_test_preprocessed.shape[0]]

        model = Sequential([
            Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps, n_features)),
            MaxPooling1D(pool_size=2),
            Conv1D(filters=128, kernel_size=3, activation='relu'),
            MaxPooling1D(pool_size=2),
            Flatten(),
            Dense(128, activation='relu'),
            Dense(64, activation='relu'),
            Dense(1, activation='sigmoid')
        ])
        # Ensure the model is built so it has a defined input for feature extraction
        model.build(input_shape=(None, n_timesteps, n_features))

        model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

        callback = ModelProgressCallback(self.progress_signal, 'CNN')
        history = model.fit(X_train_preprocessed, y_train, 
                          epochs=5, batch_size=32, verbose=1,
                          callbacks=[callback])

        # 获取训练集与测试集预测概率
        y_prob_train = model.predict(X_train_preprocessed).flatten()
        y_prob_test = model.predict(X_test_preprocessed).flatten()
        # 中间特征提取 (取倒数第二层输出作为空间特征) via functional API
        input_tensor = Input(shape=(n_timesteps, n_features))
        x = input_tensor
        for layer in model.layers[:-1]:
            x = layer(x)
        extractor = KerasModel(inputs=input_tensor, outputs=x)
        feats_train = extractor.predict(X_train_preprocessed)
        feats_test  = extractor.predict(X_test_preprocessed)

        y_pred = (y_prob_test > 0.5).astype('int32').flatten()
        
        results = {
            'model': 'CNN',
            'mse': mean_squared_error(y_test, y_pred),
            'accuracy': accuracy_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred, average='weighted'),
            'precision': precision_score(y_test, y_pred, average='weighted'),
            'f1': f1_score(y_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'history': history.history,
            'y_prob_train': y_prob_train,
            'y_prob_test': y_prob_test,
            'feats_train': feats_train,
            'feats_test': feats_test
        }
        return results

    async def run_hmm(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=None):
        # 预处理训练集和测试集
        X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
        X_test_preprocessed = preprocessor.transform(X_test).toarray()
        y_hmm = y_train

        # 训练 HMM 模型
        hmm_model = hmm.GaussianHMM(n_components=3, covariance_type="full", n_iter=200)
        hmm_model.fit(X_train_preprocessed)

        # 获取训练集与测试集隐藏状态预测
        hidden_states_train = hmm_model.predict(X_train_preprocessed)
        hidden_states_test = hmm_model.predict(X_test_preprocessed)
        # 将训练集隐藏状态映射为最常见标签
        state_to_label = {}
        for state in np.unique(hidden_states_train):
            labels_state = y_train[hidden_states_train == state]
            res_mode = mode(labels_state)
            # 获取最常出现的标签
            most_common_label = res_mode.mode.item() if hasattr(res_mode.mode, 'item') else res_mode.mode[0]
            state_to_label[state] = most_common_label
        # 构造训练集和测试集预测标签（占位概率）
        y_prob_train = np.array([state_to_label[s] for s in hidden_states_train])
        y_prob_test = np.array([state_to_label[s] for s in hidden_states_test])
        # 最终预测直接使用映射标签
        y_pred = y_prob_test

        # 评估使用测试集
        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, average='weighted')
        recall = recall_score(y_test, y_pred, average='weighted')
        f1 = f1_score(y_test, y_pred, average='weighted')

        results = {
            'model': 'HMM',
            'mse': mean_squared_error(y_test, y_pred),
            'accuracy': accuracy_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred, average='weighted'),
            'precision': precision_score(y_test, y_pred, average='weighted'),
            'f1': f1_score(y_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'y_prob_train': y_prob_train,
            'y_prob_test': y_prob_test
        }
        return results

    async def run_lstm(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
        X_test_preprocessed = preprocessor.transform(X_test).toarray()
        n_features = X_train_preprocessed.shape[1]

        def pad_or_truncate(data, n_timesteps):
            if data.shape[0] % n_timesteps != 0:
                padding_size = n_timesteps - (data.shape[0] % n_timesteps)
                data = np.pad(data, ((0, padding_size), (0, 0)), mode='constant')
            return data

        X_train_preprocessed = pad_or_truncate(X_train_preprocessed, n_timesteps)
        X_test_preprocessed = pad_or_truncate(X_test_preprocessed, n_timesteps)

        X_train_preprocessed = X_train_preprocessed.reshape(-1, n_timesteps, n_features)
        X_test_preprocessed = X_test_preprocessed.reshape(-1, n_timesteps, n_features)

        y_train = y_train[:X_train_preprocessed.shape[0]]
        y_test = y_test[:X_test_preprocessed.shape[0]]

        model = Sequential([
            Input(shape=(n_timesteps, n_features)),
            LSTM(units=50, return_sequences=True),
            LSTM(units=50),
            Dense(1, activation='sigmoid')
        ])

        model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

        callback = ModelProgressCallback(self.progress_signal, 'LSTM')
        history = model.fit(X_train_preprocessed, y_train, 
                          epochs=10, batch_size=32, verbose=1,
                          callbacks=[callback])

        # 获取训练集与测试集预测概率
        y_prob_train = model.predict(X_train_preprocessed).flatten()
        y_prob_test = model.predict(X_test_preprocessed).flatten()
        # 中间特征提取 (取第二层 LSTM 输出作为时序特征)
        input_tensor = Input(shape=(n_timesteps, n_features))
        x_out = input_tensor
        for layer in model.layers[:-1]:
            x_out = layer(x_out)
        extractor = KerasModel(inputs=input_tensor, outputs=x_out)
        feats_train = extractor.predict(X_train_preprocessed)
        feats_test  = extractor.predict(X_test_preprocessed)

        y_pred = (y_prob_test > 0.5).astype('int32').flatten()
        
        results = {
            'model': 'LSTM',
            'mse': mean_squared_error(y_test, y_pred),
            'accuracy': accuracy_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred, average='weighted'),
            'precision': precision_score(y_test, y_pred, average='weighted'),
            'f1': f1_score(y_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'history': history.history,
            'y_prob_train': y_prob_train,
            'y_prob_test': y_prob_test,
            'feats_train': feats_train,
            'feats_test': feats_test
        }
        return results

    async def run_bilstm(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
        X_test_preprocessed = preprocessor.transform(X_test).toarray()
        n_features = X_train_preprocessed.shape[1]

        def pad_or_truncate(data, n_timesteps):
            if data.shape[0] % n_timesteps != 0:
                padding_size = n_timesteps - (data.shape[0] % n_timesteps)
                data = np.pad(data, ((0, padding_size), (0, 0)), mode='constant')
            return data

        X_train_preprocessed = pad_or_truncate(X_train_preprocessed, n_timesteps)
        X_test_preprocessed = pad_or_truncate(X_test_preprocessed, n_timesteps)

        X_train_preprocessed = X_train_preprocessed.reshape(-1, n_timesteps, n_features)
        X_test_preprocessed = X_test_preprocessed.reshape(-1, n_timesteps, n_features)

        y_train = y_train[:X_train_preprocessed.shape[0]]
        y_test = y_test[:X_test_preprocessed.shape[0]]

        model = Sequential([
            Input(shape=(n_timesteps, n_features)),
            tf.keras.layers.Bidirectional(LSTM(units=50, return_sequences=True)),
            tf.keras.layers.Bidirectional(LSTM(units=50)),
            Dense(1, activation='sigmoid')
        ])

        model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

        callback = ModelProgressCallback(self.progress_signal, 'BiLSTM')
        history = model.fit(X_train_preprocessed, y_train, 
                          epochs=5, batch_size=32, verbose=1,
                          callbacks=[callback])

        # 获取训练集与测试集预测概率
        y_prob_train = model.predict(X_train_preprocessed).flatten()
        y_prob_test = model.predict(X_test_preprocessed).flatten()
        # 中间特征提取 (取倒数第二层 LSTM 输出作为时序特征)
        input_tensor = Input(shape=(n_timesteps, n_features))
        x_out = input_tensor
        for layer in model.layers[:-1]:
            x_out = layer(x_out)
        extractor = KerasModel(inputs=input_tensor, outputs=x_out)
        feats_train = extractor.predict(X_train_preprocessed)
        feats_test  = extractor.predict(X_test_preprocessed)

        y_pred = (y_prob_test > 0.5).astype('int32').flatten()
        
        results = {
            'model': 'BiLSTM',
            'mse': mean_squared_error(y_test, y_pred),
            'accuracy': accuracy_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred, average='weighted'),
            'precision': precision_score(y_test, y_pred, average='weighted'),
            'f1': f1_score(y_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'history': history.history,
            'y_prob_train': y_prob_train,
            'y_prob_test': y_prob_test,
            'feats_train': feats_train,
            'feats_test': feats_test
        }
        return results

    # async def run_rnn(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
    #     X_test_preprocessed = preprocessor.transform(X_test).toarray()
    #     n_features = X_train_preprocessed.shape[1]
    #
    #     def pad_or_truncate(data, n_timesteps):
    #         if data.shape[0] % n_timesteps != 0:
    #             padding_size = n_timesteps - (data.shape[0] % n_timesteps)
    #             data = np.pad(data, ((0, padding_size), (0, 0)), mode='constant')
    #         return data
    #
    #     X_train_preprocessed = pad_or_truncate(X_train_preprocessed, n_timesteps)
    #     X_test_preprocessed = pad_or_truncate(X_test_preprocessed, n_timesteps)
    #
    #     X_train_preprocessed = X_train_preprocessed.reshape(-1, n_timesteps, n_features)
    #     X_test_preprocessed = X_test_preprocessed.reshape(-1, n_timesteps, n_features)
    #
    #     y_train = y_train[:X_train_preprocessed.shape[0]]
    #     y_test = y_test[:X_test_preprocessed.shape[0]]
    #
    #
    #     model = Sequential([
    #         Input(shape=(n_timesteps, n_features)),
    #         tf.keras.layers.SimpleRNN(units=50, return_sequences=True),
    #         tf.keras.layers.SimpleRNN(units=50),
    #         Dense(1, activation='sigmoid')
    #     ])
    #
    #     model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    #
    #     callback = ModelProgressCallback(self.progress_signal, 'RNN')
    #     history = model.fit(X_train_preprocessed, y_train,
    #                       epochs=20, batch_size=32, verbose=1,
    #                       callbacks=[callback])
    #
    #     # 获取训练集与测试集预测概率
    #     y_prob_train = model.predict(X_train_preprocessed).flatten()
    #     y_prob_test = model.predict(X_test_preprocessed).flatten()
    #     # 中间特征提取 (取倒数第二层 RNN 输出作为时序特征)
    #     input_tensor = Input(shape=(n_timesteps, n_features))
    #     x_out = input_tensor
    #     for layer in model.layers[:-1]:
    #         x_out = layer(x_out)
    #     extractor = KerasModel(inputs=input_tensor, outputs=x_out)
    #     feats_train = extractor.predict(X_train_preprocessed)
    #     feats_test  = extractor.predict(X_test_preprocessed)
    #
    #     y_pred = (y_prob_test > 0.5).astype('int32').flatten()
    #
    #     results = {
    #         'model': 'RNN',
    #         'mse': mean_squared_error(y_test, y_pred),
    #         'accuracy': accuracy_score(y_test, y_pred),
    #         'recall': recall_score(y_test, y_pred, average='weighted'),
    #         'precision': precision_score(y_test, y_pred, average='weighted'),
    #         'f1': f1_score(y_test, y_pred, average='weighted'),
    #         'confusion_matrix': confusion_matrix(y_test, y_pred),
    #         'history': history.history,
    #         'y_prob_train': y_prob_train,
    #         'y_prob_test': y_prob_test,
    #         'feats_train': feats_train,
    #         'feats_test': feats_test
    #     }
    #     return results

    async def run_hmm_bilstm_fusion(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        """HMM-BiLSTM融合模型"""
        X_train_preprocessed = preprocessor.fit_transform(X_train).toarray()
        X_test_preprocessed = preprocessor.transform(X_test).toarray()
        n_features = X_train_preprocessed.shape[1]

        # 第一步：训练HMM模型获取隐藏状态
        hmm_model = hmm.GaussianHMM(n_components=3, covariance_type="full", n_iter=200)
        hmm_model.fit(X_train_preprocessed)
        
        # 获取训练和测试数据的隐藏状态
        hidden_states_train = hmm_model.predict(X_train_preprocessed)
        hidden_states_test = hmm_model.predict(X_test_preprocessed)
        
        # 将隐藏状态添加为新特征
        X_train_with_states = np.column_stack((X_train_preprocessed, hidden_states_train.reshape(-1, 1)))
        X_test_with_states = np.column_stack((X_test_preprocessed, hidden_states_test.reshape(-1, 1)))
        
        # 调整数据形状以适应BiLSTM
        def pad_or_truncate(data, n_timesteps):
            if data.shape[0] % n_timesteps != 0:
                padding_size = n_timesteps - (data.shape[0] % n_timesteps)
                data = np.pad(data, ((0, padding_size), (0, 0)), mode='constant')
            return data

        X_train_with_states = pad_or_truncate(X_train_with_states, n_timesteps)
        X_test_with_states = pad_or_truncate(X_test_with_states, n_timesteps)

        X_train_with_states = X_train_with_states.reshape(-1, n_timesteps, n_features + 1)
        X_test_with_states = X_test_with_states.reshape(-1, n_timesteps, n_features + 1)

        y_train = y_train[:X_train_with_states.shape[0]]
        y_test = y_test[:X_test_with_states.shape[0]]

        # 构建融合模型
        model = Sequential([
            Input(shape=(n_timesteps, n_features + 1)),  # +1 for hidden states
            tf.keras.layers.Bidirectional(LSTM(units=64, return_sequences=True)),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Bidirectional(LSTM(units=32)),
            tf.keras.layers.Dropout(0.2),
            Dense(16, activation='relu'),
            Dense(1, activation='sigmoid')
        ])

        # 编译模型
        model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
        
        # 训练模型
        callback = ModelProgressCallback(self.progress_signal, 'HMM-BiLSTM')
        history = model.fit(X_train_with_states, y_train, 
                          epochs=10, batch_size=32, verbose=1,
                          callbacks=[callback])

        # 获取训练集与测试集预测概率
        y_prob_train = model.predict(X_train_with_states).flatten()
        y_prob_test = model.predict(X_test_with_states).flatten()
        # 中间特征提取 (取倒数第二层 LSTM 输出作为时序特征)
        input_tensor = Input(shape=(n_timesteps, n_features + 1))
        x_out = input_tensor
        for layer in model.layers[:-1]:
            x_out = layer(x_out)
        extractor = KerasModel(inputs=input_tensor, outputs=x_out)
        feats_train = extractor.predict(X_train_with_states)
        feats_test  = extractor.predict(X_test_with_states)

        # 预测和评估
        y_pred = (model.predict(X_test_with_states) > 0.5).astype('int32').flatten()
        
        results = {
            'model': 'HMM-BiLSTM',
            'mse': mean_squared_error(y_test, y_pred),
            'accuracy': accuracy_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred, average='weighted'),
            'precision': precision_score(y_test, y_pred, average='weighted'),
            'f1': f1_score(y_test, y_pred, average='weighted'),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'history': history.history,
            'y_prob_train': y_prob_train,
            'y_prob_test': y_prob_test,
            'feats_train': feats_train,
            'feats_test': feats_test
        }
        return results

    async def run_bilstm_hmm_ensemble(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        """简单融合：先 BiLSTM 再 HMM，指标平均，混淆矩阵叠加"""
        res1 = await self.run_bilstm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
        res2 = await self.run_hmm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
        merged = {'model': 'BiLSTM+HMM'}
        for key in ['mse','accuracy','recall','precision','f1']:
            v1 = res1.get(key, 0) or 0
            v2 = res2.get(key, 0) or 0
            merged[key] = (v1 + v2) / 2
        cm1 = res1.get('confusion_matrix')
        cm2 = res2.get('confusion_matrix')
        merged['confusion_matrix'] = cm1 + cm2 if cm1 is not None and cm2 is not None else cm1 or cm2
        return merged

    async def run_lstm_cnn_ensemble(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        """简单融合：先 LSTM 再 CNN，指标平均，混淆矩阵叠加"""
        res1 = await self.run_lstm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
        res2 = await self.run_cnn(X_train, X_test, y_train, y_test, preprocessor)
        merged = {'model': 'LSTM+CNN'}
        for key in ['mse','accuracy','recall','precision','f1']:
            v1 = res1.get(key, 0) or 0
            v2 = res2.get(key, 0) or 0
            merged[key] = (v1 + v2) / 2
        cm1 = res1.get('confusion_matrix')
        cm2 = res2.get('confusion_matrix')
        merged['confusion_matrix'] = cm1 + cm2 if cm1 is not None and cm2 is not None else cm1 or cm2
        return merged

    async def run_bilstm_cnn_ensemble(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
        """简单融合：先 BiLSTM 再 CNN，指标平均，混淆矩阵叠加"""
        res1 = await self.run_bilstm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
        res2 = await self.run_cnn(X_train, X_test, y_train, y_test, preprocessor)
        merged = {'model': 'BiLSTM+CNN'}
        for key in ['mse','accuracy','recall','precision','f1']:
            v1 = res1.get(key, 0) or 0
            v2 = res2.get(key, 0) or 0
            merged[key] = (v1 + v2) / 2
        cm1 = res1.get('confusion_matrix')
        cm2 = res2.get('confusion_matrix')
        merged['confusion_matrix'] = cm1 + cm2 if cm1 is not None and cm2 is not None else cm1 or cm2
        return merged

    # async def run_lstm_rnn_ensemble(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     """简单融合：先 LSTM 再 RNN，指标平均，混淆矩阵叠加"""
    #     res1 = await self.run_lstm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #     res2 = await self.run_rnn(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #     merged = {'model': 'LSTM+RNN'}
    #     for key in ['mse','accuracy','recall','precision','f1']:
    #         v1 = res1.get(key, 0) or 0
    #         v2 = res2.get(key, 0) or 0
    #         merged[key] = (v1 + v2) / 2
    #     cm1 = res1.get('confusion_matrix')
    #     cm2 = res2.get('confusion_matrix')
    #     merged['confusion_matrix'] = cm1 + cm2 if cm1 is not None and cm2 is not None else cm1 or cm2
    #     return merged

    # async def run_bilstm_rnn_ensemble(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     """简单融合：先 BiLSTM 再 RNN，指标平均，混淆矩阵叠加"""
    #     res1 = await self.run_bilstm(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #     res2 = await self.run_rnn(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #     merged = {'model': 'BiLSTM+RNN'}
    #     for key in ['mse','accuracy','recall','precision','f1']:
    #         v1 = res1.get(key, 0) or 0
    #         v2 = res2.get(key, 0) or 0
    #         merged[key] = (v1 + v2) / 2
    #     cm1 = res1.get('confusion_matrix')
    #     cm2 = res2.get('confusion_matrix')
    #     merged['confusion_matrix'] = cm1 + cm2 if cm1 is not None and cm2 is not None else cm1 or cm2
    #     return merged

    # async def run_stacking_fusion(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     # 发出开始融合提示
    #     self.progress_signal.emit({'model': 'Stacking', 'status': '开始进行模型融合'})
    #     """简化堆叠融合：对各基础模型的预测概率取平均作为最终预测"""
    #     # 先运行各基础模型，收集测试集预测概率
    #     base_funcs = [self.run_cnn, self.run_hmm, self.run_lstm, self.run_bilstm, self.run_rnn]
    #     probs_list = []
    #     for func in base_funcs:
    #         res = await func(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #         probs_list.append(res.get('y_prob_test'))
    #     import numpy as np
    #     # 对齐各模型输出长度，使用最小长度截断
    #     lengths = [len(p) for p in probs_list]
    #     min_len = min(lengths)
    #     truncated = [p[:min_len] for p in probs_list]
    #     stacked_probs = np.mean(np.column_stack(truncated), axis=1)
    #     # 与 y_test 同步截断用于评估
    #     y_test_trunc = y_test[:min_len]
    #     y_pred = (stacked_probs > 0.5).astype(int)
    #     # 计算评估指标
    #     from sklearn.metrics import confusion_matrix, mean_squared_error, accuracy_score, recall_score, precision_score, f1_score
    #     cm = confusion_matrix(y_test_trunc, y_pred)
    #     mse = mean_squared_error(y_test_trunc, y_pred)
    #     acc = accuracy_score(y_test_trunc, y_pred)
    #     rec = recall_score(y_test_trunc, y_pred, average='weighted')
    #     prec = precision_score(y_test_trunc, y_pred, average='weighted')
    #     f1 = f1_score(y_test_trunc, y_pred, average='weighted')
    #     return {'model': 'Stacking', 'mse': mse, 'accuracy': acc, 'recall': rec, 'precision': prec, 'f1': f1, 'confusion_matrix': cm}

    # async def run_feature_level_fusion(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     # 发出开始融合提示
    #     self.progress_signal.emit({'model': 'FeatureFusion', 'status': '开始进行模型融合'})
    #     """特征级融合（Feature-Level Fusion）：拼接各模型中间层特征，再训练分类器。"""
    #     # TODO: 实现中间层特征提取，目前示例直接使用预处理特征作为占位
    #     X_train_ft = preprocessor.fit_transform(X_train).toarray()
    #     X_test_ft  = preprocessor.transform(X_test).toarray()
    #     from sklearn.ensemble import RandomForestClassifier
    #     clf = RandomForestClassifier(n_estimators=50, random_state=42)
    #     clf.fit(X_train_ft, y_train)
    #     y_pred = clf.predict(X_test_ft)
    #     from sklearn.metrics import confusion_matrix, mean_squared_error, accuracy_score, recall_score, precision_score, f1_score
    #     cm   = confusion_matrix(y_test, y_pred)
    #     mse  = mean_squared_error(y_test, y_pred)
    #     acc  = accuracy_score(y_test, y_pred)
    #     rec  = recall_score(y_test, y_pred, average='weighted')
    #     prec = precision_score(y_test, y_pred, average='weighted')
    #     f1   = f1_score(y_test, y_pred, average='weighted')
    #     return {'model': 'FeatureFusion', 'mse': mse, 'accuracy': acc, 'recall': rec, 'precision': prec, 'f1': f1, 'confusion_matrix': cm}

    # async def run_experts_moe(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     # 发出开始融合提示
    #     self.progress_signal.emit({'model': 'MoE', 'status': '开始门控专家融合'})
    #     """Mixture of Experts：用门控网络动态加权专家预测概率"""
    #     import numpy as np
    #     # 1. 收集各专家测试集预测概率
    #     base_funcs = [self.run_cnn, self.run_hmm, self.run_lstm, self.run_bilstm, self.run_rnn]
    #     base_results = [await func(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #                     for func in base_funcs]
    #     train_probs_list = [r['y_prob_train'] for r in base_results]
    #     test_probs_list  = [r['y_prob_test']  for r in base_results]
    #     # 对齐长度
    #     min_train = min(len(p) for p in train_probs_list)
    #     min_test  = min(len(p) for p in test_probs_list)
    #     train_probs = np.column_stack([p[:min_train] for p in train_probs_list])
    #     test_probs  = np.column_stack([p[:min_test]  for p in test_probs_list])
    #     y_train_trunc = y_train[:min_train]
    #     y_test_trunc  = y_test[:min_test]
    #     # 2. 构建门控网络
    #     import tensorflow as tf
    #     from tensorflow.keras.layers import Input, Dense, Multiply, Lambda
    #     from tensorflow.keras.models import Model as KerasModel
    #     inp = Input(shape=(train_probs.shape[1],))
    #     gate = Dense(train_probs.shape[1], activation='softmax')(inp)
    #     mult= Multiply()([inp, gate])
    #     fused = Lambda(lambda x: tf.reduce_sum(x, axis=1, keepdims=False))(mult)
    #     gating_model = KerasModel(inp, fused)
    #     gating_model.compile(optimizer='adam', loss='binary_crossentropy')
    #     # 3. 训练门控网络
    #     gating_model.fit(train_probs, y_train_trunc, epochs=5, batch_size=32, verbose=0)
    #     # 4. 测试集融合预测
    #     fused_prob = gating_model.predict(test_probs).flatten()
    #     y_pred = (fused_prob > 0.5).astype(int)
    #     # 5. 评估指标
    #     from sklearn.metrics import confusion_matrix, mean_squared_error, accuracy_score, recall_score, precision_score, f1_score
    #     cm  = confusion_matrix(y_test_trunc, y_pred)
    #     mse = mean_squared_error(y_test_trunc, y_pred)
    #     acc = accuracy_score(y_test_trunc, y_pred)
    #     rec = recall_score(y_test_trunc, y_pred, average='weighted')
    #     prec= precision_score(y_test_trunc, y_pred, average='weighted')
    #     f1 = f1_score(y_test_trunc, y_pred, average='weighted')
    #     return {'model': 'MoE', 'mse': mse, 'accuracy': acc, 'recall': rec, 'precision': prec, 'f1': f1, 'confusion_matrix': cm}

    # async def run_rl_fusion(self, X_train, X_test, y_train, y_test, preprocessor, n_timesteps=30):
    #     # 发出开始融合提示
    #     self.progress_signal.emit({'model': 'RL-Fusion', 'status': '开始进行模型融合'})
    #     """基于强化学习的融合（Reinforcement Learning-based Fusion）：代理学习专家权重策略。"""
    #     # TODO: 实现 RL 训练，目前使用递增权重占位
    #     base_funcs = [self.run_cnn, self.run_hmm, self.run_lstm, self.run_bilstm, self.run_rnn]
    #     base_results = []
    #     for func in base_funcs:
    #         res = await func(X_train, X_test, y_train, y_test, preprocessor, n_timesteps=n_timesteps)
    #         base_results.append(res)
    #     import numpy as np
    #     # 对齐各模型输出概率长度
    #     probs_list = [r['y_prob_test'] for r in base_results]
    #     lengths = [len(p) for p in probs_list]
    #     min_len = min(lengths)
    #     truncated = [p[:min_len] for p in probs_list]
    #     probs = np.column_stack(truncated)
    #     # 简单强化学习占位：线性增长权重
    #     weights = np.linspace(0.1, 1.0, probs.shape[1])
    #     weights = weights / weights.sum()
    #     final_prob = probs.dot(weights)
    #     # 截断 y_test 同步评估
    #     y_test_trunc = y_test[:min_len]
    #     y_pred = (final_prob > 0.5).astype(int)
    #     from sklearn.metrics import confusion_matrix, mean_squared_error, accuracy_score, recall_score, precision_score, f1_score
    #     cm = confusion_matrix(y_test_trunc, y_pred)
    #     mse = mean_squared_error(y_test_trunc, y_pred)
    #     acc = accuracy_score(y_test_trunc, y_pred)
    #     rec = recall_score(y_test_trunc, y_pred, average='weighted')
    #     prec = precision_score(y_test_trunc, y_pred, average='weighted')
    #     f1 = f1_score(y_test_trunc, y_pred, average='weighted')
    #     return {'model': 'RL-Fusion', 'mse': mse, 'accuracy': acc, 'recall': rec, 'precision': prec, 'f1': f1, 'confusion_matrix': cm}

    async def compare_models(self, data):
        try:
            preprocessor, features = self.prepare_features_and_labels(data)
            label_encoder = LabelEncoder()
            labels = label_encoder.fit_transform(data['Severity'])
            # 使用 RandomOverSampler 处理类别不平衡
            # ros = RandomOverSampler(random_state=42)
            # features_resampled, labels_resampled = ros.fit_resample(features, labels)
            X_train, X_test, y_train, y_test = train_test_split(
                features, labels, test_size=0.2, random_state=42)

            tasks = [
                self.run_model('CNN', self.run_cnn, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('HMM', self.run_hmm, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('LSTM', self.run_lstm, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('BiLSTM', self.run_bilstm, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('RNN', self.run_rnn, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('HMM-BiLSTM', self.run_hmm_bilstm_fusion, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('BiLSTM+HMM', self.run_bilstm_hmm_ensemble, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('LSTM+CNN', self.run_lstm_cnn_ensemble, X_train, X_test, y_train, y_test, preprocessor),
                self.run_model('BiLSTM+CNN', self.run_bilstm_cnn_ensemble, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('LSTM+RNN', self.run_lstm_rnn_ensemble, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('BiLSTM+RNN', self.run_bilstm_rnn_ensemble, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('Stacking', self.run_stacking_fusion, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('FeatureFusion', self.run_feature_level_fusion, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('MoE', self.run_experts_moe, X_train, X_test, y_train, y_test, preprocessor),
                # self.run_model('RL-Fusion', self.run_rl_fusion, X_train, X_test, y_train, y_test, preprocessor)
            ]

            results = await asyncio.gather(*tasks)
            self.training_completed_signal.emit({'results': results})
            return results
            
        except Exception as e:
            raise e