#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
与训练好的模型兼容的排列5序列LSTM模型
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class CompatiblePLWSequenceLSTM(nn.Module):
    """与训练好的模型结构兼容的排列5序列LSTM模型"""
    
    def __init__(self, input_dim=10, hidden_dim=128, num_layers=3, dropout=0.3):
        super(CompatiblePLWSequenceLSTM, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        
        # LSTM层（与训练模型结构一致，输入维度为10）
        self.lstm = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 输出层 - 为每个位置预测数字（与训练模型结构一致）
        self.position_heads = nn.ModuleList([
            nn.Linear(hidden_dim, 10) for _ in range(5)  # 5个位置，每个位置0-9
        ])
        
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """前向传播"""
        # LSTM输出
        lstm_out, _ = self.lstm(x)  # [batch_size, seq_len, hidden_dim]
        
        # 只使用最后一个时间步的输出进行预测
        last_output = lstm_out[:, -1, :]  # [batch_size, hidden_dim]
        last_output = self.dropout(last_output)
        
        # 为每个位置生成预测
        predictions = []
        for head in self.position_heads:
            pred = head(last_output)  # [batch_size, 10]
            predictions.append(pred)
        
        # 堆叠预测结果
        predictions = torch.stack(predictions, dim=1)  # [batch_size, 5, 10]
        return predictions
    
    def predict(self, x):
        """预测函数"""
        self.eval()
        with torch.no_grad():
            logits = self.forward(x)
            probabilities = F.softmax(logits, dim=-1)
            predictions = torch.argmax(probabilities, dim=-1)
            return predictions, probabilities


def load_compatible_plw_sequence_model(model_path, device=None):
    """加载与训练好的模型兼容的排列5序列LSTM模型"""
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 创建模型实例（与训练模型结构一致，输入维度为10）
    model = CompatiblePLWSequenceLSTM(input_dim=10, hidden_dim=128, num_layers=3, dropout=0.3)
    
    # 加载训练好的模型
    checkpoint = torch.load(model_path, map_location=device)
    if 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.load_state_dict(checkpoint)
    
    model.to(device)
    model.eval()
    
    return model


if __name__ == "__main__":
    # 测试模型创建
    print("[INFO] 测试兼容版排列5序列LSTM模型")
    
    # 创建模型
    model = CompatiblePLWSequenceLSTM()
    print(f"[INFO] 模型创建成功")
    print(f" 模型参数数量: {sum(p.numel() for p in model.parameters())}")
    
    # 测试前向传播
    batch_size, seq_len, input_dim = 2, 10, 10
    test_input = torch.randint(0, 10, (batch_size, seq_len, input_dim)).float()
    
    print(f" 测试输入形状: {test_input.shape}")
    
    with torch.no_grad():
        output = model(test_input)
        print(f" 模型输出形状: {output.shape}")