#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
简化版的排列5序列LSTM模型
与训练好的模型结构兼容
"""

import torch
import torch.nn as nn
import numpy as np

class SimplePLWSequenceLSTM(nn.Module):
    """简化版排列5序列LSTM模型，与训练好的模型兼容"""
    
    def __init__(self, input_dim=5, hidden_dim=128, num_layers=2, dropout=0.3):
        super(SimplePLWSequenceLSTM, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.dropout = dropout
        
        # 简化的LSTM层
        self.lstm = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 位置专用预测头 - 每个位置独立预测
        self.position_predictors = nn.ModuleList([
            nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim),
                nn.ReLU(),
                nn.Dropout(dropout),
                nn.Linear(hidden_dim, 64),
                nn.ReLU(),
                nn.Dropout(dropout),
                nn.Linear(64, 10)  # 0-9的10个数字
            ) for _ in range(5)  # 5个位置
        ])
    
    def forward(self, x):
        """
        前向传播
        Args:
            x: 输入张量 [batch_size, seq_len, 5] (5个数字特征)
        Returns:
            position_logits: 每个位置的预测 [batch_size, 5, 10]
        """
        batch_size, seq_len, input_dim = x.shape
        
        # LSTM处理
        lstm_out, _ = self.lstm(x)  # [batch_size, seq_len, hidden_dim]
        
        # 取最后一个时间步的输出
        final_output = lstm_out[:, -1, :]  # [batch_size, hidden_dim]
        
        # 为每个位置生成预测
        position_logits = []
        for pos in range(5):
            logits = self.position_predictors[pos](final_output)
            position_logits.append(logits)
        
        # 堆叠所有位置的预测
        position_logits = torch.stack(position_logits, dim=1)  # [batch_size, 5, 10]
        
        return position_logits
    
    def predict(self, x):
        """预测方法"""
        self.eval()
        with torch.no_grad():
            # 确保输入数据与模型在同一设备上
            if x.device != next(self.parameters()).device:
                x = x.to(next(self.parameters()).device)
            
            logits = self.forward(x)
            # 对每个位置应用softmax
            probabilities = torch.softmax(logits, dim=-1)
            # 取最大概率的数字
            predictions = torch.argmax(probabilities, dim=-1)
            return predictions, probabilities


def load_simple_plw_sequence_model(model_path, device=None):
    """加载简化版排列5序列LSTM模型"""
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 创建模型实例
    model = SimplePLWSequenceLSTM(input_dim=5, hidden_dim=128, num_layers=2, dropout=0.3)
    
    # 加载训练好的模型
    checkpoint = torch.load(model_path, map_location=device)
    if 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.load_state_dict(checkpoint)
    
    model.to(device)
    model.eval()
    
    return model


if __name__ == "__main__":
    # 测试模型创建
    print("[INFO] 测试简化版排列5序列LSTM模型")
    
    # 创建模型
    model = SimplePLWSequenceLSTM()
    print(f"[INFO] 模型创建成功")
    print(f" 模型参数数量: {sum(p.numel() for p in model.parameters())}")
    
    # 测试前向传播
    batch_size, seq_len, input_dim = 2, 10, 5
    test_input = torch.randn(batch_size, seq_len, input_dim)
    
    print(f" 测试输入形状: {test_input.shape}")
    
    with torch.no_grad():
        output = model(test_input)
        print(f" 模型输出形状: {output.shape}")
        
        # 测试预测
        predictions, probabilities = model.predict(test_input)
        print(f" 预测结果形状: {predictions.shape}")
        print(f" 预测示例: {predictions[0].tolist()}")