"""
LLM4CP模型定义文件
包含空间重构器和主Transformer模型
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.io as sio


class SimplePhasechaReconstructor(nn.Module):
    """基于phasecha矩阵的简化重构器"""
    def __init__(self, phasecha_path, n_probes, total_probes, device):
        super().__init__()
        self.n_probes = n_probes
        self.total_probes = total_probes
        self.device = device
        
        # 加载phasecha矩阵
        self.probe_dictionary = self.load_phasecha_matrix(phasecha_path)
        
    def load_phasecha_matrix(self, phasecha_path):
        """加载并处理phasecha矩阵"""
        try:
            mat_data = sio.loadmat(phasecha_path)
            phasecha = mat_data["phasecha"]  # [64, 481]
            
            print(f"✅ 加载phasecha矩阵: {phasecha.shape}")
            
            if np.iscomplexobj(phasecha):
                real_part = np.real(phasecha)
                imag_part = np.imag(phasecha)
                combined_matrix = np.concatenate((real_part, imag_part), axis=0)  # [128, 481]
            else:
                combined_matrix = np.concatenate((phasecha, np.zeros_like(phasecha)), axis=0)
            
            return nn.Parameter(torch.FloatTensor(combined_matrix).to(self.device))
            
        except Exception as e:
            print(f"❌ 加载phasecha失败: {e}")
            return nn.Parameter(torch.randn(128, self.total_probes).to(self.device) * 0.1)
    
    def forward(self, probe_indices, probe_weights):
        """重构空间相关性"""
        batch_size, seq_len, n_probes = probe_indices.shape
        
        # 确保索引有效
        probe_indices = torch.clamp(probe_indices.long(), 0, self.total_probes - 1)
        
        # 权重归一化
        probe_weights_norm = F.softmax(probe_weights, dim=-1)
        
        # 创建完整权重矩阵
        feature_dim_total = self.probe_dictionary.shape[0]  # 128
        total_probes = self.probe_dictionary.shape[1]       # 481
        
        full_weights = torch.zeros(batch_size, seq_len, total_probes, 
                                 device=probe_indices.device, dtype=probe_weights.dtype)
        
        # 填充权重
        for b in range(batch_size):
            for t in range(seq_len):
                full_weights[b, t].scatter_(0, probe_indices[b, t], probe_weights_norm[b, t])
        
        # 矩阵乘法重构
        full_weights_reshaped = full_weights.reshape(-1, total_probes)
        
        if self.probe_dictionary.dtype != full_weights_reshaped.dtype:
            probe_dict = self.probe_dictionary.to(dtype=full_weights_reshaped.dtype)
        else:
            probe_dict = self.probe_dictionary
        
        reconstructed = torch.matmul(full_weights_reshaped, probe_dict.t())
        reconstructed = reconstructed.reshape(batch_size, seq_len, feature_dim_total)
        
        return reconstructed


class SimpleTransformerLLM4CP(nn.Module):
    """简化的Transformer-based LLM4CP模型"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 输入嵌入
        self.input_embedding = nn.Linear(config.enc_in, config.d_model)
        self.pos_embedding = nn.Parameter(torch.randn(1, 1000, config.d_model))
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=config.d_model,
            nhead=config.n_heads,
            dim_feedforward=config.d_ff,
            dropout=0.1,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=config.n_layers)
        
        # 输出头
        self.channel_head = nn.Sequential(
            nn.Linear(config.d_model, config.d_ff),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(config.d_ff, config.enc_in)
        )
        
        self.probe_selection_head = nn.Sequential(
            nn.Linear(config.d_model, config.d_ff),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(config.d_ff, config.total_probes)
        )
        
        self.probe_weight_head = nn.Sequential(
            nn.Linear(config.d_model, config.d_ff // 2),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(config.d_ff // 2, config.n_probes),
            nn.Softmax(dim=-1)
        )
        
        # 时间投影层
        self.time_projection = nn.Linear(config.prev_len, config.pred_len)
        
    def forward(self, x_enc):
        """
        Args:
            x_enc: [B, prev_len, enc_in]
        Returns:
            dict with predictions
        """
        batch_size, seq_len, _ = x_enc.shape
        
        # 输入嵌入
        x = self.input_embedding(x_enc)  # [B, prev_len, d_model]
        
        # 位置嵌入
        x = x + self.pos_embedding[:, :seq_len, :]
        
        # Transformer编码
        encoded = self.transformer(x)  # [B, prev_len, d_model]
        
        # 时间投影到预测长度
        encoded_transposed = encoded.transpose(1, 2)  # [B, d_model, prev_len]
        pred_encoded = self.time_projection(encoded_transposed)  # [B, d_model, pred_len]
        pred_encoded = pred_encoded.transpose(1, 2)  # [B, pred_len, d_model]
        
        # 生成输出
        channel_pred = self.channel_head(pred_encoded)  # [B, pred_len, enc_in]
        probe_scores = self.probe_selection_head(pred_encoded)  # [B, pred_len, total_probes]
        probe_indices = torch.topk(probe_scores, self.config.n_probes, dim=-1)[1]  # [B, pred_len, n_probes]
        probe_weights = self.probe_weight_head(pred_encoded)  # [B, pred_len, n_probes]
        
        return {
            'channel_prediction': channel_pred,
            'probe_indices': probe_indices,
            'probe_weights': probe_weights,
            'probe_scores': probe_scores
        } 