import torch
import torch.nn as nn
from qtorch.model import LSTMModel  # 假设已有LSTM基础模块

class TransformerBlock(nn.Module):
    def __init__(self, d_model=64, nhead=4):
        super().__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model, nhead=nhead, dim_feedforward=256
        )
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
        
    def forward(self, x):
        # x shape: (seq_len, batch_size, d_model)
        return self.transformer_encoder(x)

class HybridSignalModel(nn.Module):
    def __init__(self, input_size=10, hidden_size=64):
        super().__init__()
        self.lstm = LSTMModel(input_size, hidden_size)
        self.transformer = TransformerBlock(d_model=hidden_size)
        
        self.time_fusion = nn.Sequential(
            nn.Linear(hidden_size*3, 128),
            nn.ReLU(),
            nn.Dropout(0.2)
        )
        
        self.output_head = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 3),  # 输出买入/持有/卖出概率
            nn.Softmax(dim=-1)
        )

    def forward(self, x):
        # 输入维度校验
        assert x.ndim == 3, f"输入维度错误，期望(batch, seq, features)，实际得到{x.shape}"
        
        # 自动补齐序列维度
        if x.size(1) == 1:  # 单时间步情况
            x = x.repeat(1, 2, 1)  # 复制为双时间步
            
        # LSTM分支
        lstm_out = self.lstm(x)  # (batch_size, hidden_size)
        
        # Transformer分支
        transformer_input = x.permute(1, 0, 2)  # (seq_len, batch_size, features)
        assert transformer_input.size(0) > 0, "序列长度必须大于0"
        
        trans_out = self.transformer(transformer_input)
        trans_out = trans_out.mean(dim=0)  # (batch_size, hidden_size)
        
        # 多尺度特征融合
        combined = torch.cat([
            lstm_out,
            trans_out,
            torch.abs(lstm_out - trans_out)
        ], dim=-1)
        
        fused = self.time_fusion(combined)
        return self.output_head(fused)