import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (Dense, Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, 
                                   Flatten, Dropout, BatchNormalization, Activation,
                                   Input, LSTM, Bidirectional, GlobalAveragePooling1D,
                                   Attention, MultiHeadAttention, LayerNormalization)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.utils import to_categorical

class DeepLearningModels:
    def __init__(self, input_shape, num_classes):
        self.input_shape = input_shape
        self.num_classes = num_classes
        
    def build_cnn_model(self):
        """构建1D CNN模型"""
        model = Sequential([
            Conv1D(64, 3, activation='relu', input_shape=self.input_shape),
            BatchNormalization(),
            MaxPooling1D(2),
            Conv1D(128, 3, activation='relu'),
            BatchNormalization(),
            MaxPooling1D(2),
            Conv1D(256, 3, activation='relu'),
            BatchNormalization(),
            GlobalAveragePooling1D(),
            Dropout(0.5),
            Dense(128, activation='relu'),
            Dropout(0.3),
            Dense(self.num_classes, activation='softmax')
        ])
        return model
    
    def build_resnet_model(self):
        """构建ResNet模型"""
        def residual_block(x, filters, kernel_size=3):
            shortcut = x
            x = Conv1D(filters, kernel_size, padding='same')(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Conv1D(filters, kernel_size, padding='same')(x)
            x = BatchNormalization()(x)
            x = tf.keras.layers.add([x, shortcut])
            x = Activation('relu')(x)
            return x
        
        inputs = Input(shape=self.input_shape)
        x = Conv1D(64, 7, padding='same')(inputs)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(3)(x)
        
        for filters in [64, 128, 256]:
            for _ in range(2):
                x = residual_block(x, filters)
            x = MaxPooling1D(2)(x)
        
        x = GlobalAveragePooling1D()(x)
        x = Dense(256, activation='relu')(x)
        x = Dropout(0.5)(x)
        outputs = Dense(self.num_classes, activation='softmax')(x)
        
        return Model(inputs, outputs)
    
    def build_transformer_model(self):
        """构建Transformer模型"""
        def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
            # Attention and Normalization
            x = MultiHeadAttention(key_dim=head_size, num_heads=num_heads, dropout=dropout)(inputs, inputs)
            x = Dropout(dropout)(x)
            x = LayerNormalization(epsilon=1e-6)(x + inputs)
            
            # Feed Forward Part
            y = Dense(ff_dim, activation="relu")(x)
            y = Dense(inputs.shape[-1])(y)
            y = Dropout(dropout)(y)
            y = LayerNormalization(epsilon=1e-6)(x + y)
            return y
        
        inputs = Input(shape=self.input_shape)
        x = inputs
        
        for _ in range(2):
            x = transformer_encoder(x, head_size=256, num_heads=4, ff_dim=512, dropout=0.1)
        
        x = GlobalAveragePooling1D()(x)
        x = Dropout(0.5)(x)
        outputs = Dense(self.num_classes, activation='softmax')(x)
        
        return Model(inputs, outputs)
    
    def build_hybrid_model(self):
        """构建CNN-Transformer混合模型"""
        inputs = Input(shape=self.input_shape)
        
        # CNN部分
        x = Conv1D(64, 3, activation='relu')(inputs)
        x = BatchNormalization()(x)
        x = MaxPooling1D(2)(x)
        x = Conv1D(128, 3, activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling1D(2)(x)
        
        # Transformer部分
        x = MultiHeadAttention(num_heads=4, key_dim=64)(x, x)
        x = LayerNormalization(epsilon=1e-6)(x)
        x = Dropout(0.1)(x)
        
        x = GlobalAveragePooling1D()(x)
        x = Dense(256, activation='relu')(x)
        x = Dropout(0.5)(x)
        outputs = Dense(self.num_classes, activation='softmax')(x)
        
        return Model(inputs, outputs)

# 模型训练类
class ModelTrainer:
    def __init__(self, model, model_name):
        self.model = model
        self.model_name = model_name
        self.history = None
        
    def compile_model(self, learning_rate=0.001):
        """编译模型"""
        self.model.compile(
            optimizer=Adam(learning_rate=learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy', 'precision', 'recall']
        )
    
    def train_model(self, X_train, y_train, X_val, y_val, epochs=100, batch_size=32):
        """训练模型"""
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=15, restore_best_weights=True),
            ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-7),
            ModelCheckpoint(f'best_{self.model_name}.h5', save_best_only=True)
        ]
        
        self.history = self.model.fit(
            X_train, y_train,
            validation_data=(X_val, y_val),
            epochs=epochs,
            batch_size=batch_size,
            callbacks=callbacks,
            verbose=1
        )
    
    def evaluate_model(self, X_test, y_test):
        """评估模型"""
        results = self.model.evaluate(X_test, y_test, verbose=0)
        metrics = dict(zip(self.model.metrics_names, results))
        return metrics
    
    def plot_training_history(self):
        """绘制训练历史"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
        
        ax1.plot(self.history.history['accuracy'], label='Training Accuracy')
        ax1.plot(self.history.history['val_accuracy'], label='Validation Accuracy')
        ax1.set_title('Model Accuracy')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Accuracy')
        ax1.legend()
        
        ax2.plot(self.history.history['loss'], label='Training Loss')
        ax2.plot(self.history.history['val_loss'], label='Validation Loss')
        ax2.set_title('Model Loss')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Loss')
        ax2.legend()
        
        plt.show()

# 示例使用
# 准备数据
X_train, X_test, y_train, y_test = train_test_split(
    X_selected.values, y, test_size=0.2, random_state=42, stratify=y
)
y_train_cat = to_categorical(y_train)
y_test_cat = to_categorical(y_test)

# 构建和训练模型
input_shape = (X_train.shape[1], 1)
num_classes = len(np.unique(y))

model_builder = DeepLearningModels(input_shape, num_classes)
model = model_builder.build_hybrid_model()

trainer = ModelTrainer(model, 'hybrid_model')
trainer.compile_model(learning_rate=0.001)
trainer.train_model(X_train.reshape(-1, X_train.shape[1], 1), y_train_cat, 
                   X_test.reshape(-1, X_test.shape[1], 1), y_test_cat,
                   epochs=100, batch_size=32)

# 评估模型
metrics = trainer.evaluate_model(X_test.reshape(-1, X_test.shape[1], 1), y_test_cat)
print(f"Test Accuracy: {metrics['accuracy']:.4f}")
