from common import np, pd, plt, sns
from data_processor import generate_data, extract_data, windowing_data, window_data, extract_window_label, \
    extract_statistical_features
import ModelPersister
import logging
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.optimizers import Adam
import lightgbm as lgb

logger = logging.getLogger('PdM')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logger.addHandler(console)


class PdM:
    def __init__(self, feature_keys: tuple[str, ...], label_key: str, label_alias: tuple[str, ...], window_size=10):
        self.feature_keys = feature_keys
        self.label_key = label_key
        self.label_alias = label_alias
        self.window_size = window_size

    def training_classification_lstm(self, X, y,
                                     test_size=0.3, epochs=50, batch_size=32,
                                     evaluate: bool = False):
        """
        :param X: 一个三维数组(样本数, 窗口大小, 特征数)
            np.array([
                        # 窗口样本1
                        [
                            [t1_sensor_a, t1_sensor_b, t1_sensor_c, t1_sensor_d],  # 时间步1
                            [t2_sensor_a, t2_sensor_b, t2_sensor_c, t2_sensor_d],  # 时间步2
                            ...
                        ],
                        # 窗口样本2
                        [
                            [t1_sensor_a, t1_sensor_b, t1_sensor_c, t1_sensor_d],
                            [t2_sensor_a, t2_sensor_b, t2_sensor_c, t2_sensor_d],
                            ...
                        ],
                        ...
                    ])
        :param y:
        :param test_size:
        :param epochs:
        :param batch_size:
        :param evaluate:
        :return:
        """
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42, stratify=y)

        model = self.build_model_classification_lstm(4, 4)

        # 训练模型
        model.fit(
            X_train, y_train,
            epochs=epochs,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1
        )
        if evaluate:
            self.evaluate_model_lstm(model, X_test, y_test, self.label_alias)
        return model

    def training_classification_lightgbm(self, X, y,
                                         test_size=0.3,
                                         evaluate: bool = False):
        """

        :param X: 一个二维数组(样本数, 窗口数据总特征数), 是使用flatten()将特征by窗口展平的数据, 比如一个窗口10个时间序列，4个特征，则特征数：4x10=40
                  或者，将一个窗口的数据，提取转换成其它的特征(如统计特征)
            np.array([
                        # 样本1:
                        [feature1, feature2, feature3, ..., feature_n],
                        # 样本2
                        [feature1, feature2, feature3, ..., feature_n],
                        # 更多样本...
                    ])
        :param y:
        :param test_size:
        :param evaluate:
        :return:
        """
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42, stratify=y)

        model = self.build_model_classification_lightgbm(4)

        model.fit(
            X_train, y_train,
            eval_set=[(X_test, y_test)],
            eval_metric='multi_logloss',
            callbacks=[
                lgb.early_stopping(stopping_rounds=50, verbose=True),
                lgb.log_evaluation(period=50)
            ]
        )
        if evaluate:
            self.evaluate_model_lightgbm(model, X_test, y_test, self.label_alias)
        return model

    def build_datasets_classification_lstm(self, datasets: pd.DataFrame) -> tuple[
        StandardScaler, np.ndarray, np.ndarray]:
        features, labels = extract_data(datasets, self.label_key, self.feature_keys)
        # 数据标准化 - 训练数据使用fit_transform
        # fit: 计算每个特征的均值,计算每个特征的标准差
        # transform: 对每个特征值：(值 - 均值) / 标准差
        scaler = StandardScaler()
        features_scaled = scaler.fit_transform(features)

        # 窗口化features
        X = windowing_data(features_scaled, self.window_size)
        # 窗口化labels
        y = []
        windowing_labels = windowing_data(labels, self.window_size)
        for window_labels in windowing_labels:
            window_label = extract_window_label(window_labels)
            y.append(window_label)

        return scaler, X, np.array(y)

    def build_datasets_classification_lightgbm(self, datasets: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]:
        features, labels = extract_data(datasets, self.label_key, self.feature_keys)

        X = []
        y = []
        windowing_features = windowing_data(features, self.window_size)
        for window_features in windowing_features:
            statistical_features = extract_statistical_features(window_features)
            self.feature_keys = statistical_features.columns
            X.append(statistical_features.to_numpy()[0])
        windowing_labels = windowing_data(labels, window_size)
        for window_labels in windowing_labels:
            window_label = extract_window_label(window_labels)
            y.append(window_label)
        return np.array(X), np.array(y)

    def build_model_classification_lstm(self, feature_number: int = 4, label_number: int = 4):
        model = Sequential([
            # 第一层LSTM(4个特征)
            LSTM(64, return_sequences=True, input_shape=(self.window_size, feature_number)),
            Dropout(0.2),

            # 第二层LSTM
            LSTM(32, return_sequences=False),
            Dropout(0.2),

            # 全连接层
            Dense(32, activation='relu'),
            Dropout(0.1),

            # 输出层（4个故障标签）
            Dense(label_number, activation='softmax')
        ])

        # 编译模型
        model.compile(
            optimizer=Adam(learning_rate=0.001),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy']
        )
        return model

    def build_model_classification_lightgbm(self, label_number: int = 4):
        model = lgb.LGBMClassifier(
            objective='multiclass',
            num_class=label_number,
            n_estimators=500,
            learning_rate=0.05,
            num_leaves=31,
            max_depth=6,
            subsample=0.8,
            colsample_bytree=0.8,
            reg_alpha=0.1,
            reg_lambda=0.1,
            random_state=42,
            class_weight='balanced'  # 处理类别不平衡
        )
        return model

    def predict_classification_lstm(self, model, scaler, features: np.ndarray):
        # 标准化输入数据 - 预测数据使用transform
        features_scaled = scaler.transform(features)

        feature_number = features.shape[1]
        # 调整形状 - 4个特征
        input_data = features_scaled.reshape(1, self.window_size, feature_number)

        # 预测
        probabilities = model.predict(input_data, verbose=0)[0]
        predicted_class = np.argmax(probabilities)

        return {
            'predicted_class': int(predicted_class),
            'probabilities': {
                idx: float(probability) for idx, probability in enumerate(probabilities)
            }
        }

    def predict_classification_lightgbm(self, model, features: np.ndarray):
        predicted_class = model.predict(features)[0]
        probabilities = model.predict_proba(features)[0]

        return {
            'predicted_class': int(predicted_class),
            'probabilities': {
                idx: float(probability) for idx, probability in enumerate(probabilities)
            }
        }

    def evaluate_model_lstm(self, model, X_test, y_test, label_alias):
        y_pred = np.argmax(model.predict(X_test), axis=1)

        print("\nModel evaluation results:")
        print(classification_report(y_test, y_pred, target_names=label_alias))

        # 混淆矩阵
        cm = confusion_matrix(y_test, y_pred)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=label_alias, yticklabels=label_alias)
        plt.title('混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.show()

        class_accuracies = cm.diagonal() / cm.sum(axis=1)
        print("各分类准确率:")
        for i, accuracy in enumerate(class_accuracies):
            print(f"    {label_alias[i]} : {accuracy:.4f}")

    def evaluate_model_lightgbm(self, model, X_test, y_test, label_alias):
        y_pred = model.predict(X_test)

        print("\nModel evaluation results:")
        print(classification_report(y_test, y_pred, target_names=label_alias))

        # 混淆矩阵
        cm = confusion_matrix(y_test, y_pred)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=label_alias, yticklabels=label_alias)
        plt.title('混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.show()

        class_accuracies = cm.diagonal() / cm.sum(axis=1)
        print("各分类准确率:")
        for i, accuracy in enumerate(class_accuracies):
            print(f"    {label_alias[i]} : {accuracy:.4f}")

        # 特征重要性
        feature_importance = pd.DataFrame({
            'Feature': self.feature_keys,
            'Importance': model.feature_importances_
        }).sort_values('Importance', ascending=False)

        print("\nTop 10重要特征:")
        print(feature_importance.head(10))

        # 可视化特征重要性
        plt.figure(figsize=(10, 8))
        plt.barh(feature_importance['Feature'].head(20), feature_importance['Importance'].head(20))
        plt.xlabel('Importance')
        plt.title('Top 20 Feature Importance')
        plt.tight_layout()
        plt.show()

    def visualize_data(device_data):
        plt.figure(figsize=(15, 12))

        # 绘制传感器数据
        sensors = ['sensor_a', 'sensor_b', 'sensor_c', 'sensor_d']
        colors = ['blue', 'green', 'red', 'orange']

        for i, (sensor, color) in enumerate(zip(sensors, colors)):
            plt.subplot(2, 2, i + 1)
            plt.plot(device_data['timestamp'], device_data[sensor], color=color, alpha=0.7)

            # 标记故障点
            failure_points = device_data[device_data['failure_type'] > 0]
            for _, row in failure_points.iterrows():
                plt.axvline(x=row['timestamp'], color='black', linestyle='--', alpha=0.5)

            plt.title(f'{sensor.upper()} 传感器数据')
            plt.xlabel('时间')
            plt.ylabel('数值')
            plt.xticks(rotation=45)

        plt.tight_layout()
        plt.show()

        # 显示故障类型分布
        plt.figure(figsize=(10, 6))
        failure_counts = device_data['failure_type'].value_counts().sort_index()
        class_names = ['正常-0', '故障-1', '故障-2', '故障-3']
        plt.bar(class_names, failure_counts.values)
        plt.title('故障类型分布')
        plt.ylabel('样本数量')
        for i, v in enumerate(failure_counts.values):
            plt.text(i, v + 0.5, str(v), ha='center', va='bottom')
        plt.show()

    def visualize_training_history(history):
        plt.figure(figsize=(12, 5))

        # 准确率
        plt.subplot(1, 2, 1)
        plt.plot(history.history['accuracy'], label='训练准确率')
        plt.plot(history.history['val_accuracy'], label='验证准确率')
        plt.title('模型准确率')
        plt.xlabel('Epoch')
        plt.ylabel('准确率')
        plt.legend()
        plt.grid(True)

        # 损失
        plt.subplot(1, 2, 2)
        plt.plot(history.history['loss'], label='训练损失')
        plt.plot(history.history['val_loss'], label='验证损失')
        plt.title('模型损失')
        plt.xlabel('Epoch')
        plt.ylabel('损失')
        plt.legend()
        plt.grid(True)

        plt.tight_layout()
        plt.show()


def training_save_classification_lstm():
    logger.debug("\ngenerating mock datas......")
    mock_training_datas = generate_data(100000, 0.03)
    logger.debug("mock datas: %d\n%s", len(mock_training_datas), mock_training_datas.head(1))

    logger.debug("\nbuilding datasets for training...")
    scaler, X, y = pdm.build_datasets_classification_lstm(mock_training_datas)
    logger.debug("datasets-X: %s", X.shape)
    logger.debug("datasets-y: %s", y.shape)

    logger.debug("\ntraining...")
    model = pdm.training_classification_lstm(X, y, evaluate=True)

    logger.debug("\n saving...")
    persister_lstm.save_model_keras(model)
    persister_lstm.save_scaler(scaler)


def load_predict_classification_lstm():
    logger.debug("\nloading...")
    model = persister_lstm.load_model_keras()
    scaler = persister_lstm.load_scaler()

    # 造一个窗口数据进行预测
    logger.debug("\npredicting...")
    mock_testing_datas = generate_data(1000, 0.08)
    test_features, test_labels = extract_data(mock_testing_datas, label_key, feature_keys)

    rows = len(test_features)
    random_idx = np.random.randint(window_size, rows, size=10)

    for idx in random_idx:
        window_features = window_data(test_features, idx, window_size)
        # 预测
        result = pdm.predict_classification_lstm(model, scaler, window_features)

        window_labels = window_data(test_labels, idx, window_size)
        window_label = extract_window_label(window_labels)

        print(
            f"  实际/预测/结果: {label_alias[window_label]} / {result['predicted_class']} / {"✓" if result['predicted_class'] == window_label else "✗"}")
        print(f"  预测概率:")
        for label, prob in result['probabilities'].items():
            print(f"    {label_alias[label]}: {prob:.4f}")


def training_save_classification_lightgbm():
    logger.debug("\ngenerating mock datas......")
    mock_training_datas = generate_data(100000, 0.03)
    logger.debug("mock datas: %d\n%s", len(mock_training_datas), mock_training_datas.head(1))

    logger.debug("\nbuilding datasets for training...")
    X, y = pdm.build_datasets_classification_lightgbm(mock_training_datas)
    logger.debug("datasets-X: %s", X.shape)
    logger.debug("datasets-y: %s", y.shape)

    logger.debug("\ntraining...")
    model = pdm.training_classification_lightgbm(X, y, evaluate=True)

    logger.debug("\n saving...")
    persister_lightgbm.save_model_keras(model)


def load_predict_classification_lightgbm():
    logger.debug("\nloading...")
    model = persister_lightgbm.load_model_sklearn()

    # 造一个窗口数据进行预测
    logger.debug("\npredicting...")
    mock_testing_datas = generate_data(1000, 0.08)
    test_features, test_labels = extract_data(mock_testing_datas, label_key, feature_keys)

    rows = len(test_features)
    random_idx = np.random.randint(window_size, rows, size=10)

    for idx in random_idx:
        window_features = window_data(test_features, idx, window_size)
        statistical_features = extract_statistical_features(window_features)
        # 预测
        result = pdm.predict_classification_lightgbm(model, statistical_features.to_numpy())

        window_labels = window_data(test_labels, idx, window_size)
        window_label = extract_window_label(window_labels)

        print(
            f"  实际/预测/结果: {label_alias[window_label]} / {result['predicted_class']} / {"✓" if result['predicted_class'] == window_label else "✗"}")
        print(f"  预测概率:")
        for label, prob in result['probabilities'].items():
            print(f"    {label_alias[label]}: {prob:.4f}")


if __name__ == "__main__":
    feature_keys = ('sensor_a', 'sensor_b', 'sensor_c', 'sensor_d')
    label_key = 'failure_type'
    label_alias = ('正常-0', '故障-1', '故障-2', '故障-3')
    window_size = 20
    pdm = PdM(feature_keys, label_key, label_alias, window_size)

    persister_lstm = ModelPersister.ModelPersister("models", "classification_lstm")
    persister_lightgbm = ModelPersister.ModelPersister("models", "classification_lightgbm")

    load_predict_classification_lightgbm()
