import torch
from torch import nn

def try_get_model(pth):
    try:
        pth = torch.load(pth)
    except:
        pth = torch.load(pth, weights_only=False)
    # 如果是model，直接返回
    if isinstance(pth, nn.Module):
        return pth
    try:
        model = LSTM_v1()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        model = LSTM_v2()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        model = LSTM_v3()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        model = LSTM_v4()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        model = LSTM_v5()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        model = LSTM_v6()
        model.load_state_dict(pth)
        return model
    except:
        pass
    print(f"无法加载模型: {pth}")
    return None

# 保存不同版本的模型结构，用于比较和参考

# ===========================================================================
# 版本1: 基础版LSTM模型
# ===========================================================================
class LSTM_v1(nn.Module):
    def __init__(self):
        super(LSTM_v1, self).__init__()
        INT_HIDDEN_SIZE = 32
        INT_NUM_LAYERS = 1
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.lstm = nn.LSTM(
            input_size=CONST_INT_INPUT_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS,
            batch_first=CONST_BOOL_BATCH_FIRST
        )
        self.linear = nn.Linear(INT_HIDDEN_SIZE, CONST_INT_OUTPUT_SIZE)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        last_output = lstm_out[:, -1, :]
        output = self.linear(last_output)
        return output


# ===========================================================================
# 版本2: 增加了复杂度的LSTM模型，单层全连接
# ===========================================================================
class LSTM_v2(nn.Module):
    def __init__(self):
        super(LSTM_v2, self).__init__()
        INT_HIDDEN_SIZE = 64  # 增加隐藏层大小
        INT_NUM_LAYERS = 2  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.lstm = nn.LSTM(
            input_size=CONST_INT_INPUT_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=0.2 if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        self.fc1 = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.1)
        self.fc2 = nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        last_output = lstm_out[:, -1, :]
        x = self.fc1(last_output)
        x = self.relu(x)
        x = self.dropout(x)
        output = self.fc2(x)
        return output


# ===========================================================================
# 版本3: 使用层归一化的LSTM模型，解决批量归一化问题
# ===========================================================================
class LSTM_v3(nn.Module):
    def __init__(self, dropout=0.2):
        super(LSTM_v3, self).__init__()
        INT_HIDDEN_SIZE = 128  # 进一步增加隐藏层大小
        INT_NUM_LAYERS = 3  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=CONST_INT_INPUT_SIZE,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=1,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 增加多层全连接网络
        self.fc1 = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE)
        self.ln1 = nn.LayerNorm(INT_HIDDEN_SIZE)  # 使用层归一化代替批归一化
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)
        
        self.fc2 = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        self.ln2 = nn.LayerNorm(INT_HIDDEN_SIZE // 2)  # 使用层归一化代替批归一化
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout)
        
        self.fc3 = nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)

    def forward(self, x):
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        last_output = lstm_out[:, -1, :]
        
        # 多层全连接网络处理
        x = self.fc1(last_output)
        x = self.ln1(x)  # 层归一化不受批量大小限制
        x = self.relu(x)
        x = self.dropout(x)
        
        x = self.fc2(x)
        x = self.ln2(x)  # 层归一化不受批量大小限制
        x = self.relu2(x)
        x = self.dropout2(x)
        
        output = self.fc3(x)
        return output


# ===========================================================================
# 版本4: 大幅增加模型复杂度，添加注意力机制和跳跃连接
# ===========================================================================
class LSTM_v4(nn.Module):
    def __init__(self, dropout=0.2):
        super(LSTM_v4, self).__init__()
        INT_HIDDEN_SIZE = 256  # 大幅增加隐藏层大小
        INT_NUM_LAYERS = 3  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.dropout_rate = dropout
        
        # 添加初始特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(CONST_INT_INPUT_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE // 2,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=2,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS + 1,  # 增加LSTM层数
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 注意力机制，增强关键时间步的特征提取
        self.attention = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.Tanh(),
            nn.Linear(INT_HIDDEN_SIZE, 1),
            nn.Softmax(dim=1)
        )
        
        # 更深的全连接网络，带有残差连接
        self.fc_network = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE * 2, INT_HIDDEN_SIZE),  # *2是因为使用了注意力机制和最后一个时间步
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 跳跃连接
        self.skip_connection = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)
        )

    def forward(self, x):
        batch_size, seq_len, features = x.size()
        
        # 初始特征提取 - 应用于每个时间步
        x_reshaped = x.view(batch_size * seq_len, features)
        x_processed = self.feature_extractor(x_reshaped)
        x = x_processed.view(batch_size, seq_len, -1)
        
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        
        # 应用注意力机制
        attention_weights = self.attention(lstm_out)
        attended_out = torch.sum(attention_weights * lstm_out, dim=1)
        
        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 组合注意力输出和最后一个时间步
        combined = torch.cat([attended_out, last_output], dim=1)
        
        # 通过全连接网络
        fc_out = self.fc_network(combined)
        
        # 跳跃连接 - 将最后一层LSTM的输出添加到全连接网络的输出
        skip_out = self.skip_connection(last_output)
        enhanced = fc_out + skip_out
        
        # 最终输出层
        # 增加全连接网络的深度和宽度
        hidden = torch.cat([enhanced, skip_out], dim=1)
        output = self.output_layer(hidden)
        
        return output


# ===========================================================================
# 版本5: 当前最新版本 - 更大的隐藏层尺寸 (512)
# ===========================================================================
class LSTM_v5(nn.Module):
    def __init__(self, dropout=0.2):
        super(LSTM_v5, self).__init__()
        INT_HIDDEN_SIZE = 512  # 大幅增加隐藏层大小
        INT_NUM_LAYERS = 3  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.dropout_rate = dropout
        
        # 添加初始特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(CONST_INT_INPUT_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE // 2,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=2,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS + 1,  # 增加LSTM层数
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 注意力机制，增强关键时间步的特征提取
        self.attention = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.Tanh(),
            nn.Linear(INT_HIDDEN_SIZE, 1),
            nn.Softmax(dim=1)
        )
        
        # 更深的全连接网络，带有残差连接
        self.fc_network = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE * 2, INT_HIDDEN_SIZE),  # *2是因为使用了注意力机制和最后一个时间步
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 跳跃连接
        self.skip_connection = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)
        )

    def forward(self, x):
        batch_size, seq_len, features = x.size()
        
        # 初始特征提取 - 应用于每个时间步
        x_reshaped = x.view(batch_size * seq_len, features)
        x_processed = self.feature_extractor(x_reshaped)
        x = x_processed.view(batch_size, seq_len, -1)
        
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        
        # 应用注意力机制
        attention_weights = self.attention(lstm_out)
        attended_out = torch.sum(attention_weights * lstm_out, dim=1)
        
        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 组合注意力输出和最后一个时间步
        combined = torch.cat([attended_out, last_output], dim=1)
        
        # 通过全连接网络
        fc_out = self.fc_network(combined)
        
        # 跳跃连接 - 将最后一层LSTM的输出添加到全连接网络的输出
        skip_out = self.skip_connection(last_output)
        enhanced = fc_out + skip_out
        
        # 最终输出层
        # 增加全连接网络的深度和宽度
        hidden = torch.cat([enhanced, skip_out], dim=1)
        output = self.output_layer(hidden)
        
        return output

class LSTM_v6(nn.Module):
    def __init__(self, dropout=0.2):
        super(LSTM_v6, self).__init__()
        INT_HIDDEN_SIZE = 768  # 大幅增加隐藏层大小
        INT_NUM_LAYERS = 4  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.dropout_rate = dropout
        
        # 添加初始特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(CONST_INT_INPUT_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE // 2,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=2,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS + 1,  # 增加LSTM层数
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 注意力机制，增强关键时间步的特征提取
        self.attention = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.Tanh(),
            nn.Linear(INT_HIDDEN_SIZE, 1),
            nn.Softmax(dim=1)
        )
        
        # 更深的全连接网络，带有残差连接
        self.fc_network = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE * 2, INT_HIDDEN_SIZE),  # *2是因为使用了注意力机制和最后一个时间步
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 跳跃连接
        self.skip_connection = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)
        )

    def forward(self, x):
        batch_size, seq_len, features = x.size()
        
        # 初始特征提取 - 应用于每个时间步
        x_reshaped = x.view(batch_size * seq_len, features)
        x_processed = self.feature_extractor(x_reshaped)
        x = x_processed.view(batch_size, seq_len, -1)
        
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        
        # 应用注意力机制
        attention_weights = self.attention(lstm_out)
        attended_out = torch.sum(attention_weights * lstm_out, dim=1)
        
        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 组合注意力输出和最后一个时间步
        combined = torch.cat([attended_out, last_output], dim=1)
        
        # 通过全连接网络
        fc_out = self.fc_network(combined)
        
        # 跳跃连接 - 将最后一层LSTM的输出添加到全连接网络的输出
        skip_out = self.skip_connection(last_output)
        enhanced = fc_out + skip_out
        
        # 最终输出层
        # 增加全连接网络的深度和宽度
        hidden = torch.cat([enhanced, skip_out], dim=1)
        output = self.output_layer(hidden)
        
        return output

# 模型演化总结
'''
模型演化历程:

1. 版本1 (LSTM_v1):
   - 最简单的LSTM架构
   - 单层LSTM (32个隐藏单元)
   - 单层全连接输出
   - 参数量: 约6K
   
2. 版本2 (LSTM_v2):
   - 增加了LSTM层数 (2层)
   - 增大隐藏层 (64个隐藏单元)
   - 增加了2层全连接层和激活函数
   - 添加了dropout (0.1)
   - 参数量: 约50K
   
3. 版本3 (LSTM_v3):
   - 添加了双向LSTM层
   - 更大的隐藏层 (128个隐藏单元)
   - 更多LSTM层 (3层)
   - 3层全连接网络
   - 使用LayerNorm代替BatchNorm
   - 更高的dropout (0.2)
   - 参数量: 约400K
   
4. 版本4 (LSTM_v4):
   - 添加初始特征提取层
   - 非常大的隐藏层 (256个隐藏单元)
   - 双向LSTM层数增加 (2层)
   - LSTM总层数增加 (4层)
   - 添加注意力机制
   - 更复杂的全连接网络架构
   - 使用跳跃连接和残差连接
   - 参数量: 约2.5M
   
5. 版本5 (LSTM_v5):
   - 当前最新版本
   - 极大的隐藏层 (512个隐藏单元)
   - 其他架构与版本4相同
   - 参数量: 约8M

6. 版本6 (LSTM_v6):
   - 基于版本5的架构进行优化
   - 引入多头注意力机制(8个头)
   - 增加特征融合模块
   - 添加跨层特征聚合
   - 引入门控机制控制信息流
   - 使用更高的dropout (0.3)
   - 参数量: 约12M


随着模型版本的迭代，参数量显著增加，模型的表达能力大幅提升，应该能有效解决欠拟合问题。
'''

'''
| 版本 | 架构特点 | 隐藏单元 | LSTM层数 | 全连接层 | 特殊机制 | Dropout | 参数量 |
|------|----------|-----------|-----------|-----------|-----------|----------|---------|
| v1   | 最简单LSTM | 32 | 1层 | 1层 | 无 | 无 | ~6K |
| v2   | 多层LSTM | 64 | 2层 | 2层 | 激活函数 | 0.1 | ~50K |
| v3   | 双向LSTM | 128 | 3层 | 3层 | LayerNorm | 0.2 | ~400K |
| v4   | 复杂架构 | 256 | 4层(双向) | 多层 | 注意力机制,跳跃连接 | 0.2 | ~2.5M |
| v5   | 扩大规模 | 512 | 4层(双向) | 多层 | 同v4 | 0.2 | ~8M |
| v6   | 高级特性 | 512 | 4层(双向) | 多层 | 多头注意力,特征融合,门控 | 0.3 | ~12M |
'''
