import torch
import torch.nn as nn

class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2, bidirectional=False):
        super(LSTMModel, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_size = output_size
        self.dropout = dropout
        self.bidirectional = bidirectional
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=bidirectional
        )
        
        # 全连接层
        direction_multiplier = 2 if bidirectional else 1
        self.fc = nn.Linear(hidden_size * direction_multiplier, output_size)
        
        # Dropout层
        self.dropout_layer = nn.Dropout(dropout)
        
    def forward(self, x):
        # x的形状: (batch_size, sequence_length, input_size)
        
        # LSTM前向传播
        lstm_out, (hidden, cell) = self.lstm(x)
        
        # 如果使用双向LSTM，需要处理两个方向的输出
        if self.bidirectional:
            # 取最后一个时间步的输出，并连接两个方向的隐藏状态
            lstm_out = lstm_out[:, -1, :]
        else:
            # 取最后一个时间步的隐藏状态
            lstm_out = lstm_out[:, -1, :]
        
        # Dropout
        lstm_out = self.dropout_layer(lstm_out)
        
        # 全连接层
        output = self.fc(lstm_out)
        
        return output
    
    def get_config(self):
        """获取模型配置"""
        return {
            'input_size': self.input_size,
            'hidden_size': self.hidden_size,
            'num_layers': self.num_layers,
            'output_size': self.output_size,
            'dropout': self.dropout,
            'bidirectional': self.bidirectional
        }

class LSTMModelWithAttention(nn.Module):
    """带注意力机制的LSTM模型"""
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2, bidirectional=False):
        super(LSTMModelWithAttention, self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_size = output_size
        self.dropout = dropout
        self.bidirectional = bidirectional
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=bidirectional
        )
        
        # 注意力机制
        direction_multiplier = 2 if bidirectional else 1
        self.attention = nn.Linear(hidden_size * direction_multiplier, 1)
        
        # 全连接层
        self.fc = nn.Linear(hidden_size * direction_multiplier, output_size)
        
        # Dropout层
        self.dropout_layer = nn.Dropout(dropout)
        
    def forward(self, x):
        # x的形状: (batch_size, sequence_length, input_size)
        
        # LSTM前向传播
        lstm_out, (hidden, cell) = self.lstm(x)
        
        # 注意力权重计算
        attention_weights = torch.softmax(self.attention(lstm_out), dim=1)
        
        # 加权求和
        weighted_output = torch.sum(attention_weights * lstm_out, dim=1)
        
        # Dropout
        weighted_output = self.dropout_layer(weighted_output)
        
        # 全连接层
        output = self.fc(weighted_output)
        
        return output
    
    def get_config(self):
        """获取模型配置"""
        return {
            'input_size': self.input_size,
            'hidden_size': self.hidden_size,
            'num_layers': self.num_layers,
            'output_size': self.output_size,
            'dropout': self.dropout,
            'bidirectional': self.bidirectional
        }

def create_model(model_type='basic', **kwargs):
    """创建模型工厂函数"""
    if model_type == 'basic':
        return LSTMModel(**kwargs)
    elif model_type == 'attention':
        return LSTMModelWithAttention(**kwargs)
    else:
        raise ValueError(f"未知的模型类型: {model_type}")