import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
    Input, Embedding, Dense, Concatenate, Dropout, Flatten, 
    BatchNormalization, LeakyReLU, Reshape
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
import numpy as np
import logging
import os

from .attention_layers import AttentionLayer, MultiHeadAttention

class DeepHybridModel:
    """
    深度学习混合推荐模型 - 完整版本
    结合协同过滤 + 内容特征 + 深度学习 + 注意力机制
    """
    
    def __init__(self, n_users, n_movies, user_feature_dim, movie_feature_dim, 
                 config=None):
        self.n_users = n_users
        self.n_movies = n_movies
        self.user_feature_dim = user_feature_dim
        self.movie_feature_dim = movie_feature_dim
        default_config = self._get_default_config()
        if config is not None:
            default_config.update(config)
        self.config = default_config
        self.logger = logging.getLogger(__name__)
        self.model = None
        self.history = None
        
    def _get_default_config(self):
        """获取默认配置"""
        return {
            'embedding_dim': 64,
            'hidden_layers': [256, 128, 64],
            'dropout_rates': [0.3, 0.3, 0.2],
            'learning_rate': 0.001,
            'batch_size': 1024,
            'epochs': 100,
            'use_attention': True,
            'use_batch_norm': True,
            'l2_reg': 0.0001,
            'activation': 'leaky_relu'
        }
    
    def _create_feature_encoder(self, input_layer, feature_dim, name_prefix):
        """创建特征编码器"""
        x = input_layer
        
        # 如果特征维度很高，先进行降维
        if feature_dim > 512:
            x = Dense(512, activation='relu', name=f'{name_prefix}_dense_1')(x)
            x = Dropout(0.2)(x)
            feature_dim = 512
        
        # 编码层
        for i, units in enumerate([256, 128]):
            if units < feature_dim:
                x = Dense(units, activation='relu', 
                         kernel_regularizer=l2(self.config['l2_reg']),
                         name=f'{name_prefix}_dense_{i+2}')(x)
                if self.config['use_batch_norm']:
                    x = BatchNormalization()(x)
                x = Dropout(self.config['dropout_rates'][min(i, len(self.config['dropout_rates'])-1)])(x)
        
        return x
    
    def build_model(self):
        """构建混合神经网络模型"""
        self.logger.info("构建深度学习混合模型...")
        
        # 1. 输入层
        user_id_input = Input(shape=(1,), name='user_id_input')
        movie_id_input = Input(shape=(1,), name='movie_id_input')
        user_features_input = Input(shape=(self.user_feature_dim,), name='user_feature_input')
        # movie_features_input = Input(shape=(self.movie_feature_dim,), name='movie_features_input')
        
        # 2. 嵌入层
        user_embedding = Embedding(
            self.n_users, 
            self.config['embedding_dim'],
            embeddings_regularizer=l2(self.config['l2_reg']),
            name='user_embedding'
        )(user_id_input)
        
        movie_embedding = Embedding(
            self.n_movies, 
            self.config['embedding_dim'],
            embeddings_regularizer=l2(self.config['l2_reg']),
            name='movie_embedding'
        )(movie_id_input)
        
        # 3. 特征编码
        user_features_encoded = self._create_feature_encoder(
            user_features_input, self.user_feature_dim, 'user_features'
        )
        # movie_features_encoded = self._create_feature_encoder(
        #     movie_features_input, self.movie_feature_dim, 'movie_features'
        # )
        
        # 4. 展平嵌入
        user_embedding_flat = Flatten()(user_embedding)
        movie_embedding_flat = Flatten()(movie_embedding)
        
        # 5. 合并所有特征（每个特征 expand_dims，拼成序列）
        from tensorflow.keras.layers import Lambda
        embedding_dim = self.config['embedding_dim']
        # 投影 user_features_encoded 和 movie_features_encoded 到 embedding_dim 维度，并加唯一 name
        user_features_proj = Dense(embedding_dim, activation='relu', name='user_features_proj_dense')(user_features_encoded)
        user_features_proj = Dropout(self.config['dropout_rates'][0], name='user_features_proj_dropout')(user_features_proj)
        # movie_features_proj = Dense(embedding_dim, activation='relu', name='movie_features_proj_dense')(movie_features_encoded)
        # movie_features_proj = Dropout(self.config['dropout_rates'][0], name='movie_features_proj_dropout')(movie_features_proj)

        user_embedding_exp = Lambda(lambda x: tf.expand_dims(x, axis=1), name='user_embedding_expand')(user_embedding_flat)  # (batch, 1, embedding_dim)
        movie_embedding_exp = Lambda(lambda x: tf.expand_dims(x, axis=1), name='movie_embedding_expand')(movie_embedding_flat)
        user_features_exp = Lambda(lambda x: tf.expand_dims(x, axis=1), name='user_features_expand')(user_features_proj)
        # movie_features_exp = Lambda(lambda x: tf.expand_dims(x, axis=1), name='movie_features_expand')(movie_features_proj)
        merged_sequence = Concatenate(axis=1, name='feature_sequence_concat')([
            user_embedding_exp,
            movie_embedding_exp,
            user_features_exp,
            # movie_features_exp
        ])  # (batch, 3, embedding_dim)

        # 6. 注意力机制（可选）
        if self.config['use_attention']:
            # 直接对 (batch, 3, dim) 做多头注意力
            attended, attention_weights = MultiHeadAttention(
                num_heads=4,
                head_dim=32
            )(merged_sequence)
            # 全局平均池化
            from tensorflow.keras.layers import Lambda
            attended_pooled = Lambda(lambda x: tf.reduce_mean(x, axis=1))(attended)
            x = attended_pooled
        else:
            # 如果不用 attention，则直接展平成一维
            x = Flatten()(merged_sequence)
        
        # 7. 深度神经网络
        for i, units in enumerate(self.config['hidden_layers']):
            x = Dense(
                units,
                kernel_regularizer=l2(self.config['l2_reg']),
                name=f'main_dense_{i+1}'
            )(x)
            if self.config['use_batch_norm']:
                x = BatchNormalization(name=f'main_batch_norm_{i+1}')(x)
            if self.config['activation'] == 'leaky_relu':
                x = LeakyReLU(alpha=0.1, name=f'main_leaky_relu_{i+1}')(x)
            else:
                x = tf.keras.layers.Activation('relu', name=f'main_relu_{i+1}')(x)
            x = Dropout(self.config['dropout_rates'][min(i, len(self.config['dropout_rates'])-1)], 
                       name=f'main_dropout_{i+1}')(x)
        
        # 8. 输出层
        output = Dense(1, activation='linear', name='rating_output')(x)
        
        # 9. 创建模型
        self.model = Model(
            inputs=[
                user_id_input, 
                movie_id_input, 
                user_features_input, 
                # movie_features_input
            ],
            outputs=output,
            name='deep_hybrid_recommender'
        )
        
        # 10. 编译模型
        self._compile_model()
        
        self.logger.info("模型构建完成")
        return self.model
    
    def _compile_model(self):
        """编译模型"""
        optimizer = Adam(
            learning_rate=self.config['learning_rate'],
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-7
        )
        
        self.model.compile(
            optimizer=optimizer,
            loss='mse',
            metrics=['mae', 'mse']
        )
    
    def get_callbacks(self, model_checkpoint_path=None):
        """获取训练回调函数"""
        callbacks = [
            EarlyStopping(
                monitor='val_loss',
                patience=10,
                restore_best_weights=True,
                verbose=1
            ),
            ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.5,
                patience=5,
                min_lr=1e-7,
                verbose=1
            ),
            tf.keras.callbacks.TensorBoard(
                log_dir='./logs',
                histogram_freq=1
            )
        ]
        
        if model_checkpoint_path:
            checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
                filepath=model_checkpoint_path,
                save_weights_only=True,
                monitor='val_loss',
                mode='min',
                save_best_only=True,
                verbose=1
            )
            callbacks.append(checkpoint_callback)
            
        return callbacks
    
    def summary(self):
        """打印模型摘要"""
        if self.model:
            return self.model.summary()
        else:
            self.logger.warning("模型尚未构建")
    
    def save_model(self, filepath):
        """保存模型"""
        if self.model:
            self.model.save(filepath)
            self.logger.info(f"模型已保存到: {filepath}")
        else:
            self.logger.warning("没有模型可保存")
    
    def load_model(self, filepath):
        """加载模型"""
        if os.path.exists(filepath):
            self.model = tf.keras.models.load_model(
                filepath,
                custom_objects={
                    'AttentionLayer': AttentionLayer,
                    'MultiHeadAttention': MultiHeadAttention,
                    'LeakyReLU': LeakyReLU
                }
            )
            self.logger.info(f"模型已从 {filepath} 加载")
            return True
        else:
            self.logger.error(f"模型文件不存在: {filepath}")
            return False