import os
import json
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import time
import random
import math
from sklearn.utils import class_weight
import shutil

# 设置随机种子确保可重复性
SEED = 42
tf.random.set_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)


class HighAccuracyModel(tf.keras.Model):
    def __init__(self, input_shape, num_classes):
        super(HighAccuracyModel, self).__init__()
        self.num_classes = num_classes
        self.input_shape_val = input_shape  # 显式存储输入形状

        # 卷积模块
        self.conv1 = layers.Conv1D(64, 5, activation='relu', padding='same')
        self.bn1 = layers.BatchNormalization()
        self.pool1 = layers.MaxPooling1D(2)
        self.drop1 = layers.Dropout(0.3)

        self.conv2 = layers.Conv1D(128, 5, activation='relu', padding='same')
        self.bn2 = layers.BatchNormalization()
        self.pool2 = layers.MaxPooling1D(2)
        self.drop2 = layers.Dropout(0.3)

        self.conv3 = layers.Conv1D(256, 5, activation='relu', padding='same')
        self.bn3 = layers.BatchNormalization()
        self.pool3 = layers.MaxPooling1D(2)
        self.drop3 = layers.Dropout(0.4)

        # LSTM模块
        self.lstm1 = layers.Bidirectional(layers.LSTM(256, return_sequences=True))
        self.ln1 = layers.LayerNormalization()
        self.drop4 = layers.Dropout(0.4)

        self.lstm2 = layers.Bidirectional(layers.LSTM(256, return_sequences=True))
        self.ln2 = layers.LayerNormalization()
        self.drop5 = layers.Dropout(0.4)

        # Attention机制
        self.attention = layers.Attention()

        # 特征融合
        self.concat = layers.Concatenate()
        self.gap = layers.GlobalAveragePooling1D()
        self.gmp = layers.GlobalMaxPooling1D()
        self.concat2 = layers.Concatenate()

        # 全连接层
        self.dense1 = layers.Dense(512, activation='relu')
        self.bn4 = layers.BatchNormalization()
        self.drop6 = layers.Dropout(0.5)

        self.dense2 = layers.Dense(256, activation='relu')
        self.bn5 = layers.BatchNormalization()
        self.drop7 = layers.Dropout(0.5)

        self.dense3 = layers.Dense(128, activation='relu')
        self.bn6 = layers.BatchNormalization()
        self.drop8 = layers.Dropout(0.4)

        # 输出层
        self.output_layer = layers.Dense(num_classes, activation='softmax')

        # 显式构建模型
        self.build((None, *input_shape))

    def build(self, input_shape):
        # 显式构建模型
        super(HighAccuracyModel, self).build(input_shape)
        # 确保所有层都构建
        self.conv1.build(input_shape)
        self.bn1.build(self.conv1.compute_output_shape(input_shape))
        # 继续为所有层调用build...

        # 为所有层设置built=True
        for layer in self.layers:
            if not hasattr(layer, 'built') or not layer.built:
                layer.built = True

    def call(self, inputs):
        # 卷积路径
        x = self.conv1(inputs)
        x = self.bn1(x)
        x = self.pool1(x)
        x = self.drop1(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.pool2(x)
        x = self.drop2(x)

        x = self.conv3(x)
        x = self.bn3(x)
        x = self.pool3(x)
        x = self.drop3(x)

        # LSTM路径
        y = self.lstm1(x)
        y = self.ln1(y)
        y = self.drop4(y)

        y = self.lstm2(y)
        y = self.ln2(y)
        y = self.drop5(y)

        # Attention机制
        att = self.attention([y, y])

        # 特征融合
        combined = self.concat([y, att])

        # 全局特征
        gap = self.gap(combined)
        gmp = self.gmp(combined)
        global_features = self.concat2([gap, gmp])

        # 全连接层
        z = self.dense1(global_features)
        z = self.bn4(z)
        z = self.drop6(z)

        z = self.dense2(z)
        z = self.bn5(z)
        z = self.drop7(z)

        z = self.dense3(z)
        z = self.bn6(z)
        z = self.drop8(z)

        # 输出
        return self.output_layer(z)


class AdaptiveLearningRate(tf.keras.callbacks.Callback):
    def __init__(self, factor=0.5, patience=5, min_lr=1e-6, improvement_threshold=0.001):
        super(AdaptiveLearningRate, self).__init__()
        self.factor = factor
        self.patience = patience
        self.min_lr = min_lr
        self.improvement_threshold = improvement_threshold
        self.wait = 0
        self.best_val_acc = 0

    def on_epoch_end(self, epoch, logs=None):
        current_val_acc = logs.get('val_accuracy')
        current_lr = tf.keras.backend.get_value(self.model.optimizer.lr)

        if current_val_acc > self.best_val_acc + self.improvement_threshold:
            self.best_val_acc = current_val_acc
            self.wait = 0
        else:
            self.wait += 1
            if self.wait >= self.patience:
                new_lr = max(current_lr * self.factor, self.min_lr)
                tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)
                print(f"\nEpoch {epoch + 1}: Reducing learning rate to {new_lr:.6f}")
                self.wait = 0


class HighAccuracyStopping(tf.keras.callbacks.Callback):
    def __init__(self, target_accuracy=0.99, patience=30):
        super(HighAccuracyStopping, self).__init__()
        self.target_accuracy = target_accuracy
        self.patience = patience
        self.wait = 0
        self.stopped_epoch = 0
        self.best_weights = None

    def on_train_begin(self, logs=None):
        self.wait = 0
        self.best_weights = None

    def on_epoch_end(self, epoch, logs=None):
        current_val_acc = logs.get('val_accuracy')

        if current_val_acc >= self.target_accuracy:
            self.wait += 1
            if self.wait >= self.patience:
                self.stopped_epoch = epoch
                self.model.stop_training = True
                print(
                    f"\nReached target accuracy of {self.target_accuracy} for {self.patience} consecutive epochs. Stopping training.")
                self.model.set_weights(self.best_weights)
        else:
            self.wait = 0

        # 保存最佳权重
        if self.best_weights is None or current_val_acc > logs.get('best_val_acc', 0):
            self.best_weights = self.model.get_weights()
            logs['best_val_acc'] = current_val_acc


def load_data(data_path="preprocessed_data"):
    """加载预处理后的数据"""
    X = np.load(os.path.join(data_path, "trajectories.npy"))
    y = np.load(os.path.join(data_path, "labels.npy"))

    with open(os.path.join(data_path, "label_map.json"), 'r') as f:
        label_map = json.load(f)

    return X, y, label_map


def create_ensemble_models(input_shape, num_classes, n_models=3):
    """创建集成学习模型"""
    models = []
    for i in range(n_models):
        model = HighAccuracyModel(input_shape, num_classes)
        model.compile(
            optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy']
        )
        models.append(model)
    return models


def train_ensemble(models, X_train, y_train, X_val, y_val, epochs=100, batch_size=64):
    """训练模型集成"""
    histories = []

    # 计算类别权重（处理不平衡数据）
    class_weights = class_weight.compute_class_weight(
        'balanced',
        classes=np.unique(y_train),
        y=y_train
    )
    class_weights = dict(enumerate(class_weights))

    for i, model in enumerate(models):
        print(f"\nTraining model {i + 1}/{len(models)}")

        # 创建模型目录
        model_dir = f'models/model_{i + 1}'
        os.makedirs(model_dir, exist_ok=True)

        callbacks = [
            tf.keras.callbacks.ModelCheckpoint(
                filepath=os.path.join(model_dir, 'best_model'),
                save_best_only=True,
                monitor='val_accuracy',
                mode='max',
                verbose=1,
                save_format='tf'  # 使用TensorFlow SavedModel格式
            ),
            AdaptiveLearningRate(
                factor=0.5,
                patience=8,
                min_lr=1e-7,
                improvement_threshold=0.0005
            ),
            HighAccuracyStopping(
                target_accuracy=0.99,
                patience=30
            ),
            tf.keras.callbacks.EarlyStopping(
                monitor='val_accuracy',
                patience=15,
                verbose=1,
                restore_best_weights=True
            )
        ]

        # 显式构建模型
        model.build((None, *X_train.shape[1:]))

        history = model.fit(
            X_train, y_train,
            validation_data=(X_val, y_val),
            epochs=epochs,
            batch_size=batch_size,
            callbacks=callbacks,
            verbose=1,
            class_weight=class_weights
        )

        histories.append(history)

    return histories


def evaluate_ensemble(models, X_test, y_test):
    """评估模型集成"""
    predictions = np.zeros((len(X_test), models[0].num_classes))

    for model in models:
        # 确保模型已构建
        model.build((None, *X_test.shape[1:]))
        preds = model.predict(X_test, verbose=0)
        predictions += preds

    predictions /= len(models)
    y_pred = np.argmax(predictions, axis=1)
    accuracy = np.mean(y_pred == y_test)

    print(f"\nEnsemble Accuracy: {accuracy:.4f}")
    return accuracy


def save_final_ensemble(models, model_path="models/ensemble_model", label_map=None):
    """保存集成模型和标签映射"""
    # 显式获取输入形状和类别数
    input_shape = models[0].input_shape_val
    num_classes = models[0].num_classes

    print(f"Saving final ensemble with input shape: {input_shape}, num_classes: {num_classes}")

    # 创建新模型架构
    ensemble_model = HighAccuracyModel(input_shape, num_classes)

    # 编译模型
    ensemble_model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
    )

    # 设置权重为平均权重
    weights = [model.get_weights() for model in models]
    avg_weights = []

    for weights_list_tuple in zip(*weights):
        layer_weights = []
        for w in zip(*weights_list_tuple):
            # 对每个位置的权重取平均值
            layer_weights.append(np.array(w).mean(axis=0))
        avg_weights.append(np.array(layer_weights))

    ensemble_model.set_weights(avg_weights)

    # 显式构建模型
    ensemble_model.build((None, *input_shape))

    # 保存模型为TensorFlow格式
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    ensemble_model.save(model_path, save_format='tf')
    print(f"Ensemble model saved to {model_path}")

    # 保存标签映射
    if label_map is not None:
        # 确保标签映射格式正确
        if isinstance(label_map, dict) and all(isinstance(k, int) for k in label_map.keys()):
            # 转换为字符串键的字典
            label_map_str = {str(k): v for k, v in label_map.items()}
        else:
            label_map_str = label_map

        label_map_path = os.path.join(model_path, "label_map.json")
        with open(label_map_path, 'w') as f:
            json.dump(label_map_str, f, indent=2)
        print(f"Label map saved to {label_map_path}")

        # 同时保存到主模型目录
        main_label_map_path = os.path.join("models", "label_map.json")
        with open(main_label_map_path, 'w') as f:
            json.dump(label_map_str, f, indent=2)
        print(f"Label map also saved to {main_label_map_path}")

        return label_map_str


def plot_history(histories):
    """绘制训练历史图表"""
    plt.figure(figsize=(14, 10))

    # 为每个模型绘制准确率
    plt.subplot(2, 1, 1)
    for i, history in enumerate(histories):
        plt.plot(history.history['val_accuracy'], label=f'Model {i + 1} Val Accuracy')
    plt.title('Validation Accuracy of Ensemble Models', fontsize=14)
    plt.ylabel('Accuracy', fontsize=12)
    plt.xlabel('Epoch', fontsize=12)
    plt.legend(loc='lower right', fontsize=10)
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.ylim([0.8, 1.0])

    # 为每个模型绘制损失
    plt.subplot(2, 1, 2)
    for i, history in enumerate(histories):
        plt.plot(history.history['val_loss'], label=f'Model {i + 1} Val Loss')
    plt.title('Validation Loss of Ensemble Models', fontsize=14)
    plt.ylabel('Loss', fontsize=12)
    plt.xlabel('Epoch', fontsize=12)
    plt.legend(loc='upper right', fontsize=10)
    plt.grid(True, linestyle='--', alpha=0.7)

    # 调整布局
    plt.tight_layout()

    # 保存为图片
    plt.savefig('models/ensemble_training_history.png', dpi=300, bbox_inches='tight')

    # 显示图表
    plt.show()


def train_high_accuracy_model():
    # 加载数据
    X, y, label_map = load_data()

    # 确保标签映射是整数键的字典
    if isinstance(label_map, dict) and all(k.isdigit() for k in label_map.keys()):
        label_map = {int(k): v for k, v in label_map.items()}
    elif isinstance(label_map, list):
        # 如果标签映射是列表，转换为字典
        label_map = {i: name for i, name in enumerate(label_map)}

    # 检查数据
    print(f"Dataset shape: {X.shape}")
    print(f"Labels shape: {y.shape}")
    print(f"Number of classes: {len(set(y))}")
    print(f"Class distribution: {np.bincount(y)}")

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X, y, test_size=0.15, random_state=SEED, stratify=y
    )

    print(f"Training samples: {X_train.shape[0]}, Validation samples: {X_val.shape[0]}")
    print(f"Input shape: {X_train.shape[1:]}")

    # 创建模型集成
    models = create_ensemble_models(X_train.shape[1:], len(set(y)), n_models=3)

    # 确保模型目录存在
    if os.path.exists('models'):
        shutil.rmtree('models')
    os.makedirs('models', exist_ok=True)

    # 训练模型集成
    start_time = time.time()
    histories = train_ensemble(models, X_train, y_train, X_val, y_val, epochs=100)
    training_time = time.time() - start_time
    print(f"Training completed! Time: {training_time / 3600:.2f} hours")

    # 评估集成模型
    ensemble_accuracy = evaluate_ensemble(models, X_val, y_val)

    # 保存最终模型和标签映射
    saved_label_map = save_final_ensemble(models, label_map=label_map)
    print("Ensemble model and label map saved")

    # 绘制训练历史
    plot_history(histories)

    # 保存训练报告
    save_training_report(histories, ensemble_accuracy, training_time, saved_label_map)


def save_training_report(histories, ensemble_accuracy, training_time, label_map):
    """保存训练报告"""
    report = {
        "training_time_hours": training_time / 3600,
        "final_ensemble_accuracy": float(ensemble_accuracy),
        "label_map": label_map,
        "model_histories": []
    }

    for i, history in enumerate(histories):
        model_report = {
            "model": i + 1,
            "final_val_accuracy": history.history['val_accuracy'][-1],
            "min_val_loss": min(history.history['val_loss']),
            "epochs": len(history.history['val_accuracy'])
        }
        report["model_histories"].append(model_report)

    report_path = 'models/training_report.json'
    with open(report_path, 'w') as f:
        json.dump(report, f, indent=2)

    print(f"Training report saved to {report_path}")


if __name__ == "__main__":
    # 打印TensorFlow版本信息
    print(f"TensorFlow version: {tf.__version__}")
    print(f"Running with seed: {SEED}")

    # 训练模型
    train_high_accuracy_model()