import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Model, GPT2Config, GPT2Tokenizer
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
from peft import LoraConfig, get_peft_model
import numpy as np
from math import sqrt
import sys
import os
import scipy.io
# 不再尝试导入外部StandardNorm
# 使用内部定义的Normalize类作为StandardNorm

def get_activation_fn(activation):
    """
    获取激活函数
    
    Args:
        activation: 激活函数名称
        
    Returns:
        激活函数对象
    """
    activation = activation.lower()
    if activation == 'relu':
        return nn.ReLU()
    elif activation == 'gelu':
        return nn.GELU()
    elif activation in ['silu', 'swish']:
        return nn.SiLU()  # SiLU和Swish是相同的
    elif activation == 'mish':
        return nn.Mish()  # Mish激活函数（PyTorch 1.9+支持）
    elif activation == 'leakyrelu':
        return nn.LeakyReLU(0.1)
    else:
        raise ValueError(f"不支持的激活函数: {activation}")

class ReplicationPad1d(nn.Module):
    def __init__(self, padding) -> None:
        super(ReplicationPad1d, self).__init__()
        self.padding = padding

    def forward(self, input):
        replicate_padding = input[:, :, -1].unsqueeze(-1).repeat(1, 1, self.padding[-1])
        output = torch.cat([input, replicate_padding], dim=-1)
        return output

class FlattenHead(nn.Module):
    def __init__(self, n_vars, nf, target_window, head_dropout=0):
        super().__init__()
        self.n_vars = n_vars
        self.flatten = nn.Flatten(start_dim=-2)
        self.linear = nn.Linear(nf, target_window)
        self.dropout = nn.Dropout(head_dropout)

    def forward(self, x):
        x = self.flatten(x)
        x = self.linear(x)
        x = self.dropout(x)
        return x

class Normalize(nn.Module):
    def __init__(self, num_features: int, eps=1e-5, affine=False):
        """
        归一化层，用于输入特征的标准化和反标准化
        
        Args:
            num_features: 特征或通道数
            eps: 数值稳定性添加的小值
            affine: 是否使用可学习的仿射参数
        """
        super(Normalize, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.affine = affine
        self.mean = None
        self.stdev = None
        
        if self.affine:
            self.affine_weight = nn.Parameter(torch.ones(1, num_features, 1))
            self.affine_bias = nn.Parameter(torch.zeros(1, num_features, 1))
    
    def _get_statistics(self, x):
        # x: [B, C, L]
        dim2reduce = -1
        self.mean = torch.mean(x, dim=dim2reduce, keepdim=True)
        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps)
        
    def _normalize(self, x):
        # x: [B, C, L]
        x = (x - self.mean) / self.stdev
        if self.affine:
            x = x * self.affine_weight + self.affine_bias
        return x
    
    def _denormalize(self, x):
        # x: [B, C, L]
        if self.affine:
            x = (x - self.affine_bias) / self.affine_weight
        x = x * self.stdev + self.mean
        return x
    
    def forward(self, x, mode: str):
        if mode == 'norm':
            self._get_statistics(x)
            x = self._normalize(x)
        elif mode == 'denorm':
            x = self._denormalize(x)
        else:
            raise NotImplementedError
        return x

class PatchEmbedding(nn.Module):
    def __init__(self, d_model, patch_len, stride, dropout):
        super(PatchEmbedding, self).__init__()
        # Patching
        self.patch_len = patch_len
        self.stride = stride
        self.padding_patch_layer = ReplicationPad1d((0, stride))

        # Backbone, Input encoding: projection of feature vectors onto a d-dim vector space
        self.value_embedding = nn.Linear(patch_len, d_model)

        # Residual dropout
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        # do patching
        n_vars = x.shape[1]
        x = self.padding_patch_layer(x)
        x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)
        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))
        # Input encoding
        x = self.value_embedding(x)
        return self.dropout(x), n_vars


class ReprogrammingLayer(nn.Module):
    def __init__(self, d_model, n_heads, d_llm, attention_dropout=0.3):  # 🔥 从0.2增加到0.3
        super(ReprogrammingLayer, self).__init__()
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_llm = d_llm
        self.dropout = nn.Dropout(attention_dropout)
        # 🔥 新增：特征级dropout - 增强正则化，从0.15增加到0.2
        self.feature_dropout = nn.Dropout(0.2)
        
        # 简化设计：直接定义投影层
        d_keys = d_model // n_heads
        bottleneck_dim = 32 # 定义瓶颈维度
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        
        self.key_projection = nn.Sequential(
            nn.Linear(d_llm, bottleneck_dim),
            nn.GELU(),
            nn.Linear(bottleneck_dim, d_keys * n_heads)
        )
        self.value_projection = nn.Sequential(
            nn.Linear(d_llm, bottleneck_dim),
            nn.GELU(),
            nn.Linear(bottleneck_dim, d_keys * n_heads)
        )
        self.out_projection = nn.Sequential(
            nn.Linear(d_keys * n_heads, bottleneck_dim),
            nn.GELU(),
            nn.Linear(bottleneck_dim, d_llm)
        )
        
        # 添加层归一化和残差连接支持
        self.norm1 = nn.LayerNorm(d_llm)
        self.norm2 = nn.LayerNorm(d_llm)
        
        # 如果d_model != d_llm，需要投影层用于残差连接
        if d_model != d_llm:
            self.input_projection = nn.Sequential(
                nn.Linear(d_model, bottleneck_dim),
                nn.GELU(),
                nn.Linear(bottleneck_dim, d_llm)
            )
        else:
            self.input_projection = nn.Identity()
        
        print(f"ReprogrammingLayer: d_model={d_model}, n_heads={n_heads}, d_llm={d_llm}, attention_dropout={attention_dropout}")
        
    def forward(self, target_embedding, source_embedding, value_embedding):
        """
        Args:
            target_embedding: [B, T, d_model]，目标序列嵌入（来自输入特征）
            source_embedding: [vocab_size, d_llm]，源序列嵌入（来自LLM词嵌入）
            value_embedding: [vocab_size, d_llm]，值序列嵌入（来自LLM词嵌入）
        
        Returns:
            output: [B, T, d_llm]，输出到LLM的嵌入
        """
        B, L, _ = target_embedding.shape
        S, _ = source_embedding.shape
        H = self.n_heads

        # 投影target_embedding到d_llm维度用于残差连接
        target_projected = self.input_projection(target_embedding)  # [B, L, d_llm]

        target_embedding = self.query_projection(target_embedding).view(B, L, H, -1)
        source_embedding = self.key_projection(source_embedding).view(S, H, -1)
        value_embedding = self.value_projection(value_embedding).view(S, H, -1)

        # 第一个子层：注意力机制 + 残差连接 + 归一化
        out = self.reprogramming(target_embedding, source_embedding, value_embedding)
        out = out.reshape(B, L, -1)
        out = self.out_projection(out)
        
        # 🔥 新增：特征级dropout应用
        out = self.feature_dropout(out)
        
        # 残差连接 + 归一化
        out = self.norm1(target_projected + self.dropout(out))
        
        return out

    def reprogramming(self, target_embedding, source_embedding, value_embedding):
        B, L, H, E = target_embedding.shape

        scale = 1. / sqrt(E)

        scores = torch.einsum("blhe,she->bhls", target_embedding, source_embedding)

        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        reprogramming_embedding = torch.einsum("bhls,she->blhe", A, value_embedding)

        return reprogramming_embedding


class NoResidualReprogrammingLayer(nn.Module):
    """
    没有残差连接的ReprogrammingLayer版本，用于对比实验
    """
    def __init__(self, d_model, n_heads, d_llm, attention_dropout=0.3):  # 🔥 从0.2增加到0.3
        super(NoResidualReprogrammingLayer, self).__init__()
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_llm = d_llm
        self.dropout = nn.Dropout(attention_dropout)
        # 🔥 新增：特征级dropout - 增强正则化，从0.15增加到0.2
        self.feature_dropout = nn.Dropout(0.2)
        
        # 简化设计：直接定义投影层
        d_keys = d_model // n_heads
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        self.key_projection = nn.Linear(d_llm, d_keys * n_heads)
        self.value_projection = nn.Linear(d_llm, d_keys * n_heads)
        self.out_projection = nn.Linear(d_keys * n_heads, d_llm)
        
        # 只保留一个层归一化
        self.norm = nn.LayerNorm(d_llm)

    def forward(self, target_embedding, source_embedding, value_embedding):
        B, L, _ = target_embedding.shape
        S, _ = source_embedding.shape
        H = self.n_heads

        target_embedding = self.query_projection(target_embedding).view(B, L, H, -1)
        source_embedding = self.key_projection(source_embedding).view(S, H, -1)
        value_embedding = self.value_projection(value_embedding).view(S, H, -1)

        # 注意力机制，没有残差连接
        out = self.reprogramming(target_embedding, source_embedding, value_embedding)
        out = out.reshape(B, L, -1)
        out = self.out_projection(out)
        
        # 🔥 新增：特征级dropout应用
        out = self.feature_dropout(out)
        
        # 只有归一化，没有残差连接
        out = self.norm(self.dropout(out))
        
        return out

    def reprogramming(self, target_embedding, source_embedding, value_embedding):
        B, L, H, E = target_embedding.shape

        scale = 1. / sqrt(E)

        scores = torch.einsum("blhe,she->bhls", target_embedding, source_embedding)

        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        reprogramming_embedding = torch.einsum("bhls,she->blhe", A, value_embedding)

        return reprogramming_embedding


class AttentionLayer(nn.Module):
    """简化版的注意力层"""
    def __init__(self, d_model, n_heads, dropout=0.1):
        super(AttentionLayer, self).__init__()
        self.attention = nn.MultiheadAttention(
            embed_dim=d_model,
            num_heads=n_heads,
            dropout=dropout,
            batch_first=True
        )
        
    def forward(self, queries, keys, values):
        out, _ = self.attention(queries, keys, values)
        return out


class TwoStageAttentionLayer(nn.Module):
    """
    简化版的两阶段注意力层，用于处理多簇信道特征，包含残差连接
    """
    def __init__(self, seg_num, factor, d_model, n_heads, dropout=0.1):
        super(TwoStageAttentionLayer, self).__init__()
        self.dim_sender = AttentionLayer(d_model, n_heads, dropout=dropout)
        self.router = nn.Parameter(torch.randn(seg_num, factor, d_model))
        self.dropout = nn.Dropout(dropout)
        
        # 添加层归一化
        self.norm = nn.LayerNorm(factor * d_model)
        
        # 残差连接投影层（如果需要）
        self.residual_projection = nn.Linear(seg_num * d_model, factor * d_model)
        
    def forward(self, x):
        """
        输入形状: [batch_size, ts_d, seg_num, d_model]
        输出形状: [batch_size, seg_num, factor*d_model]
        """
        batch = x.shape[0]
        
        # 保存输入用于残差连接
        # 将输入重塑为适合残差连接的形状
        x_for_residual = x.permute(0, 2, 1, 3)  # [b, seg_num, ts_d, d_model]
        x_for_residual = x_for_residual.reshape(batch, x_for_residual.shape[1], -1)  # [b, seg_num, ts_d*d_model]
        
        # 调整维度，适应处理
        x_dim = x.permute(0, 2, 1, 3)  # [b, seg_num, ts_d, d_model]
        
        # 展平批次和段号维度
        dim_send = x_dim.reshape(batch * x_dim.shape[1], x_dim.shape[2], x_dim.shape[3])
        
        # 重复路由器用于所有批次
        batch_router = self.router.unsqueeze(0).repeat(batch, 1, 1, 1)
        batch_router = batch_router.reshape(batch * batch_router.shape[1], batch_router.shape[2], batch_router.shape[3])
        
        # 应用注意力
        dim_buffer = self.dim_sender(batch_router, dim_send, dim_send)
        
        # 重组维度
        final_out = dim_buffer.reshape(batch, x_dim.shape[1], -1)  # [batch, seg_num, factor*d_model]
        
        # 残差连接：投影输入到输出维度
        residual = self.residual_projection(x_for_residual)
        
        # 残差连接 + 归一化
        final_out = self.norm(final_out + self.dropout(residual))
        
        return final_out


class TemporalPositionalEncoding(nn.Module):
    """
    时间位置编码层，为序列数据添加位置信息
    """
    def __init__(self, d_model, max_len=5000):
        """
        初始化位置编码层
        
        Args:
            d_model (int): 模型维度
            max_len (int): 最大序列长度
        """
        super(TemporalPositionalEncoding, self).__init__()
        
        # 创建位置编码
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        self.register_buffer('pe', pe.unsqueeze(0))
        
    def forward(self, x):
        """
        添加位置编码
        
        Args:
            x: 输入序列，形状为[B, T, ...]
            
        Returns:
            x_with_pe: 添加了位置编码的序列
        """
        return x + self.pe[:, :x.size(1)]


class TransformerEncoder(nn.Module):
    """
    标准的Transformer编码器，用于替代TwoStageAttention
    """
    def __init__(self, d_model, n_heads, d_ff=None, dropout=0.1, activation="relu", n_layers=2):
        super(TransformerEncoder, self).__init__()
        d_ff = d_ff or 4 * d_model
        self.encoder_layers = nn.ModuleList([
            nn.TransformerEncoderLayer(
                d_model=d_model,
                nhead=n_heads,
                dim_feedforward=d_ff,
                dropout=dropout,
                activation=activation,
                batch_first=True
            ) for _ in range(n_layers)
        ])
        
        self.norm = nn.LayerNorm(d_model)
    
    def forward(self, src):
        """
        Args:
            src: 输入序列 [batch_size, seq_len, d_model]
        Returns:
            output: 编码后的序列 [batch_size, seq_len, d_model]
        """
        output = src
        for layer in self.encoder_layers:
            output = layer(output)
        return self.norm(output)


class ResidualProjectionBlock(nn.Module):
    """
    带有残差连接的投影块，用于缓解梯度消失问题
    """
    def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.1):
        super(ResidualProjectionBlock, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        
        # 第一个子层：线性变换 + 激活 + dropout
        self.linear1 = nn.Linear(input_dim, hidden_dim)
        self.activation1 = nn.SiLU()  # 🔥 从ReLU改为SiLU
        self.dropout1 = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(hidden_dim)
        
        # 第二个子层：线性变换 + dropout
        self.linear2 = nn.Linear(hidden_dim, output_dim)
        self.dropout2 = nn.Dropout(dropout)
        self.norm2 = nn.LayerNorm(output_dim)
        
        # 残差连接的投影层（如果输入输出维度不同）
        if input_dim != output_dim:
            self.residual_projection = nn.Linear(input_dim, output_dim)
        else:
            self.residual_projection = nn.Identity()
    
    def forward(self, x):
        """
        前向传播，包含两个子层的残差连接
        
        Args:
            x: 输入张量 [batch_size, seq_len, input_dim]
        Returns:
            output: 输出张量 [batch_size, seq_len, output_dim]
        """
        # 保存输入用于残差连接
        residual = x
        
        # 第一个子层：线性变换 + 激活 + 归一化 + 残差连接
        out = self.linear1(x)
        out = self.activation1(out)
        out = self.dropout1(out)
        out = self.norm1(out)
        
        # 第二个子层：线性变换 + 归一化 + 残差连接
        out = self.linear2(out)
        out = self.dropout2(out)
        
        # 残差连接：如果维度不匹配，先投影residual
        residual_projected = self.residual_projection(residual)
        out = out + residual_projected
        
        # 最终归一化
        out = self.norm2(out)
        
        return out


class SimplifiedProjectionBlock(nn.Module):
    """
    简化版投影块，减少残差连接的复杂度
    """
    def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.1):
        super(SimplifiedProjectionBlock, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        
        # 合并为单个投影层
        self.projection = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.SiLU(),  # 🔥 从ReLU改为SiLU
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, output_dim)
        )
        
        # 只保留一个层归一化
        self.norm = nn.LayerNorm(output_dim)
        
        # 残差连接的投影层（如果输入输出维度不同）
        if input_dim != output_dim:
            self.residual_projection = nn.Linear(input_dim, output_dim)
        else:
            self.residual_projection = nn.Identity()
    
    def forward(self, x):
        """
        前向传播，包含简化的残差连接
        
        Args:
            x: 输入张量 [batch_size, seq_len, input_dim]
        Returns:
            output: 输出张量 [batch_size, seq_len, output_dim]
        """
        # 保存输入用于残差连接
        residual = x
        
        # 投影
        out = self.projection(x)
        
        # 残差连接
        residual_projected = self.residual_projection(residual)
        out = out + residual_projected
        
        # 归一化
        out = self.norm(out)
        
        return out


class Model(nn.Module):
    """
    基于大语言模型的信道到探头权重预测模型
    """
    def __init__(self, configs):
        """
        初始化模型
        
        Args:
            configs: 配置参数
        """
        super(Model, self).__init__()
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.configs = configs
        
        # 设置数据类型
        if torch.cuda.is_available() and torch.cuda.is_bf16_supported():
            self.dtype = torch.bfloat16
            print("统一使用BFloat16数据类型")
        else:
            self.dtype = torch.float16
            print("CUDA不支持BFloat16，使用Float16数据类型")
        
        # 检测是否使用空间相关性数据
        self.use_spatial_corr = getattr(configs, 'data', "") == "SPATIAL_CORR"
        if self.use_spatial_corr:
            print("检测到使用空间相关性数据，不使用簇相关参数")
            self.spatial_corr_dim = 128  # 64实部 + 64虚部
        else:
            # 模型配置 - 仅在非空间相关性数据时使用
            self.n_clusters = getattr(configs, 'n_clusters', 25)  # 簇数量，如果没有设置则默认为25
            self.feature_dim = 5  # 每个簇的特征维度
            
        # 通用配置
        self.seq_len = configs.seq_len  # 输入序列长度
        self.pred_len = configs.pred_len  # 预测长度
        self.n_probes = configs.n_probes  # 探头数量
        
        # 模型维度
        self.d_model = configs.d_model  # 模型隐藏层维度
        self.d_ff = configs.d_ff  # 前馈网络维度
        
        # Patch参数
        self.patch_len = getattr(configs, 'patch_len', 2)  # 修改为2（原为10）
        self.stride = getattr(configs, 'stride', 1)  # 修改为1（原为5）
        
         # 选择和配置LLM模型
        llm_model = getattr(configs, 'llm_model', 'GPT2-large')  # 默认使用GPT2-large
        print(f"使用LLM模型: {llm_model}")
        
        # 标记是否使用Qwen模型
        self.use_qwen = llm_model in ['Qwen-4B', 'Qwen3-4B', 'Qwen3-8B', 'Qwen-1.7B']
        if self.use_qwen:
            print(f"检测到Qwen模型: {llm_model}, 将使用特定的Qwen处理逻辑")
        
        # 根据模型类型加载不同的预训练模型
        if llm_model == 'GPT2-large':
            # 加载GPT2-large
            gpt2_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'gpt2-large')
            self.gpt2_config = GPT2Config.from_pretrained(gpt2_path)
            self.gpt2 = GPT2Model.from_pretrained(gpt2_path, config=self.gpt2_config)
            self.tokenizer = GPT2Tokenizer.from_pretrained(gpt2_path)
            print("成功加载GPT2-large tokenizer")
        elif llm_model == 'GPT2-medium':
            # 加载GPT2-medium
            gpt2_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'gpt2-medium')
            self.gpt2_config = GPT2Config.from_pretrained(gpt2_path)
            self.gpt2 = GPT2Model.from_pretrained(gpt2_path, config=self.gpt2_config)
            self.tokenizer = GPT2Tokenizer.from_pretrained(gpt2_path)
            print("成功加载GPT2-medium tokenizer")
        elif llm_model == 'GPT2-small':
            # 加载GPT2-small
            gpt2_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'gpt2-small')
            self.gpt2_config = GPT2Config.from_pretrained(gpt2_path)
            self.gpt2 = GPT2Model.from_pretrained(gpt2_path, config=self.gpt2_config)
            self.tokenizer = GPT2Tokenizer.from_pretrained(gpt2_path)
            print("成功加载GPT2-small tokenizer")
        elif llm_model in ['Qwen-4B', 'Qwen3-4B', 'Qwen3-8B']:
            # 设置Qwen模型
            if llm_model == 'Qwen3-8B':
                qwen_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'qwen3-8b')
                print(f"Qwen3-8B模型路径: {qwen_path}")
            else:
                qwen_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'qwen-4b')
                print(f"Qwen-4B模型路径: {qwen_path}")
            
            try:
                # 加载Qwen配置
                self.qwen_config = AutoConfig.from_pretrained(
                    qwen_path,
                    trust_remote_code=True
                )
                
                # 修改Qwen配置，减少计算量
                # self.qwen_config.num_hidden_layers = min(configs.llm_layers, self.qwen_config.num_hidden_layers)
                self.qwen_config.use_cache = False
                self.qwen_config.output_attentions = False
                
                print(f"成功加载Qwen-4B配置，使用{self.qwen_config.num_hidden_layers}层")
                print(f"Qwen-4B隐藏层维度: {self.qwen_config.hidden_size}, 词汇表大小: {self.qwen_config.vocab_size}")
                
                # 设置使用Qwen模型
                self.use_qwen = True
                model_path = qwen_path
            except Exception as e:
                print(f"加载Qwen-4B配置失败: {e}")
                raise RuntimeError(f"无法加载Qwen-4B配置，请确保模型文件存在于正确位置: {qwen_path}")
        elif llm_model == 'Qwen-1.7B':
            qwen_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'qwen-1.7b')
            print(f"Qwen-1.7B模型路径: {qwen_path}")
            
            try:
                # 加载Qwen配置
                self.qwen_config = AutoConfig.from_pretrained(
                    qwen_path,
                    trust_remote_code=True
                )
                
                # 修改Qwen配置，减少计算量
                # self.qwen_config.num_hidden_layers = min(configs.llm_layers, self.qwen_config.num_hidden_layers)
                self.qwen_config.use_cache = False
                self.qwen_config.output_attentions = False
                
                print(f"成功加载Qwen-1.7B配置，使用{self.qwen_config.num_hidden_layers}层")
                print(f"Qwen-1.7B隐藏层维度: {self.qwen_config.hidden_size}, 词汇表大小: {self.qwen_config.vocab_size}")
                
                # 设置使用Qwen模型
                self.use_qwen = True
                model_path = qwen_path
            except Exception as e:
                print(f"加载Qwen-1.7B配置失败: {e}")
                raise RuntimeError(f"无法加载Qwen-1.7B配置，请确保模型文件存在于正确位置: {qwen_path}")
        else:
            print(f"未知的LLM模型类型: {llm_model}，使用默认的GPT2模型")
            gpt2_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'gpt2-large')
            self.gpt2_config = GPT2Config.from_pretrained(gpt2_path)
            self.gpt2 = GPT2Model.from_pretrained(gpt2_path, config=self.gpt2_config)
            self.use_qwen = False
        
        # 根据模型加载不同的预训练模型
        if (llm_model in ['Qwen-4B', 'Qwen3-4B', 'Qwen3-8B', 'Qwen-1.7B']) and getattr(self, 'use_qwen', False):
            # 加载Qwen模型
            try:
                print(f"正在加载{llm_model}模型...")
                
                # 根据设备环境选择数据类型
                if torch.cuda.is_available():
                    model_dtype = torch.bfloat16  # Qwen默认使用bfloat16
                    print(f"使用bfloat16加载{llm_model}模型")
                else:
                    model_dtype = torch.float16
                    print(f"在CPU上使用float16加载{llm_model}模型")
                
                # 加载模型
                self.llm = AutoModelForCausalLM.from_pretrained(
                    model_path,
                    device_map=None,  # 让accelerate处理设备映射
                    torch_dtype=model_dtype,
                    trust_remote_code=True,
                    config=self.qwen_config
                )
                
                # 加载tokenizer
                self.tokenizer = AutoTokenizer.from_pretrained(
                    qwen_path,
                    trust_remote_code=True
                )
                print(f"成功加载{llm_model} tokenizer")
                
                # 设置输出维度
                self.llm_output_dim = self.qwen_config.hidden_size
                
                print(f"成功加载{llm_model}模型，嵌入维度: {self.llm_output_dim}")
            except Exception as e:
                print(f"加载{llm_model}模型失败: {e}")
                raise RuntimeError(f"无法加载{llm_model}模型，请确保模型文件存在于正确位置: {model_path}")
        else:
            print("使用GPT2模型")
            self.llm = self.gpt2
            self.llm_output_dim = self.gpt2_config.n_embd
            # 不使用Qwen
            self.use_qwen = False
        
        # 如果分词器没有pad_token，则设置一个
        if self.tokenizer.pad_token is None:
            if self.tokenizer.eos_token:
                self.tokenizer.pad_token = self.tokenizer.eos_token
            else:
                pad_token = '[PAD]'
                self.tokenizer.add_special_tokens({'pad_token': pad_token})
                self.tokenizer.pad_token = pad_token
        
        # 获取词嵌入并设置映射层
        if self.use_qwen:
            # Qwen模型直接使用其输入嵌入矩阵
            if hasattr(self.llm, 'model') and hasattr(self.llm.model, 'embed_tokens'):
                self.word_embeddings = self.llm.model.embed_tokens.weight
            else:
                # 对于 LoRA 模型或没有直接embed_tokens访问的情况，使用 get_input_embeddings
                self.word_embeddings = self.llm.get_input_embeddings().weight
            # 注意：这里word_embeddings的尺寸是[vocab_size, hidden_size]
            # 对于Qwen可能是[151936, 4096]
        else:
            # GPT2使用get_input_embeddings获取
            self.word_embeddings = self.llm.get_input_embeddings().weight
            
        self.vocab_size = self.word_embeddings.shape[0]
        self.embedding_dim = self.word_embeddings.shape[1]
        print(f"词表大小: {self.vocab_size}, 嵌入维度: {self.embedding_dim}")
        
        # 设定更合理的token数量，用于简化词表映射
        self.num_tokens = 1000
        
        # 对于Qwen模型，我们必须确保映射层的维度正确
        if self.use_qwen:
            # 使用更小的投影维度
            # self.mapping_layer = nn.Linear(self.embedding_dim, self.num_tokens)
            # print(f"创建Qwen词嵌入映射层: {self.embedding_dim} -> {self.num_tokens}")
            self.mapping_layer = nn.Identity()
            print("禁用mapping_layer，使用nn.Identity()")
        else:
            # 为GPT2使用原始映射
            self.mapping_layer = nn.Linear(self.vocab_size, self.num_tokens)
            print(f"创建GPT2词表映射层: {self.vocab_size} -> {self.num_tokens}")
        
        # 添加任务描述文本，用于提示模型
        if hasattr(configs, 'prompt_domain') and configs.prompt_domain:
            self.description = configs.content
        else:
            self.description = '信道预测任务是通过分析信道簇特征预测探头权重的关键技术，可提高无线通信系统性能。'
        
        # 添加通用dropout层
        self.dropout = nn.Dropout(configs.dropout)
        
        # 设置GPT2模型参数为不可训练
        for param in self.llm.parameters():
            param.requires_grad = False
            
        # 如果启用了LoRA，应用LoRA配置
        if getattr(configs, 'islora', False):
            print("使用LoRA训练模式")
            
            # 根据模型类型选择不同的target_modules
            if self.use_qwen:
                # Qwen模型的注意力层名称
                target_modules = ["gate_proj", "up_proj", "down_proj"]
                print("为Qwen模型配置LoRA target_modules: q_proj, k_proj, v_proj, o_proj")
            else:
                # GPT2模型的注意力层名称
                target_modules = ["c_attn", "c_proj"]
                print("为GPT2模型配置LoRA target_modules: c_attn, c_proj")
            
            lora_config = LoraConfig(
                r=getattr(configs, 'lora_r', 8),
                lora_alpha=getattr(configs, 'lora_alpha', 32),
                lora_dropout=getattr(configs, 'lora_dropout', 0.01),
                target_modules=target_modules,
                bias="none",  # 不训练bias
                task_type="FEATURE_EXTRACTION",  # 特征提取任务
            )
            
            # 将LoRA应用到LLM模型
            self.llm = get_peft_model(self.llm, lora_config)
            
            # 打印可训练参数信息
            trainable_params = sum(p.numel() for p in self.llm.parameters() if p.requires_grad)
            all_params = sum(p.numel() for p in self.llm.parameters())
            model_name = "Qwen" if self.use_qwen else "GPT2"
            print(f"{model_name}模型使用LoRA，可训练参数比例: {trainable_params / all_params:.2%}")
            print(f"LoRA配置: r={lora_config.r}, alpha={lora_config.lora_alpha}, dropout={lora_config.lora_dropout}")
            
            # 打印LoRA模块信息
            if hasattr(self.llm, 'print_trainable_parameters'):
                self.llm.print_trainable_parameters()
        
        # 添加简化模式选项
        self.use_minimal_mode = getattr(configs, 'use_minimal_mode', False)
        self.use_positional_encoding = getattr(configs, 'use_positional_encoding', False) # 新增开关

        if self.use_minimal_mode:
            print("🚀 使用极简模式：只保留ReprogrammingLayer!")
            if self.use_positional_encoding:
                print("   ...并启用独立的位置编码。")
        
        # 新增PatchEmbedding模块
        self.use_patch_embedding = getattr(configs, 'use_patch_embedding', False) and not self.use_minimal_mode
        
        # 添加归一化层 - 无论是否使用PatchEmbedding都需要
        # 对于空间相关性数据，使用128作为特征维度(64实部+64虚部)
        if hasattr(self, 'use_spatial_corr') and self.use_spatial_corr:
            standard_norm_features = self.spatial_corr_dim  # 128 for spatial correlation
            print(f"使用空间相关性标准化层，特征维度: {standard_norm_features}")
        else:
            standard_norm_features = self.n_clusters * self.feature_dim
            print(f"使用簇特征标准化层，特征维度: {standard_norm_features}")
        
        self.standard_norm = Normalize(num_features=standard_norm_features, eps=1e-5, affine=False)
        
        if self.use_patch_embedding:
            print(f"使用PatchEmbedding: patch_len={self.patch_len}, stride={self.stride}")
            self.patch_embedding = PatchEmbedding(
                d_model=self.d_model,
                patch_len=self.patch_len,
                stride=self.stride,
                dropout=configs.dropout
            )
            # 计算patch数量
            self.patch_nums = int((self.seq_len - self.patch_len) / self.stride + 2)
            print(f"计算得到的patch数量: {self.patch_nums} (期望与seq_len={self.seq_len}匹配)")
            
            # 计算头部特征数量
            self.head_nf = self.d_ff * self.patch_nums
            
            # 添加输出投影层
            self.output_projection = FlattenHead(
                n_vars=self.n_clusters,
                nf=self.head_nf, 
                target_window=self.n_probes,
                head_dropout=configs.dropout
            )
            
            # 控制是否使用TwoStageAttention或Transformer
            self.use_two_stage_attention = getattr(configs, 'use_two_stage_attention', True)
            self.use_transformer = getattr(configs, 'use_transformer', False)
            
            if self.use_two_stage_attention and not self.use_transformer:
                print("使用TwoStageAttention进行多簇处理")
                # 添加双阶段注意力层，用于处理多簇场景
                self.twostageatt = TwoStageAttentionLayer(
                    seg_num=self.patch_nums,
                    factor=1,
                    d_model=self.d_model,
                    n_heads=configs.n_heads,
                    dropout=configs.dropout
                )
            elif self.use_transformer:
                print("使用Transformer编码器替代TwoStageAttention")
                # 添加Transformer编码器层
                n_transformer_layers = getattr(configs, 'n_transformer_layers', 2)
                self.transformer_encoder = TransformerEncoder(
                    d_model=self.d_model,
                    n_heads=configs.n_heads,
                    d_ff=self.d_ff,
                    dropout=configs.dropout,
                    n_layers=n_transformer_layers
                )
                # 添加位置编码 - 移动到这里，确保任何使用Transformer或独立位置编码的模式都能初始化它
                self.pos_embedding = TemporalPositionalEncoding(d_model=self.d_model)
            else:
                print("不使用TwoStageAttention或Transformer，直接flatten处理多簇数据")
        
        # 添加ReprogrammingLayer
        self.use_reprogramming = getattr(configs, 'use_reprogramming', True)
        # 添加配置选项：是否在ReprogrammingLayer中使用残差连接
        self.use_residual_in_reprogramming = getattr(configs, 'use_residual_in_reprogramming', True)
        # 添加配置选项：是否在大模型输出后添加残差连接
        self.use_residual_after_llm = getattr(configs, 'use_residual_after_llm', False)
        
        if self.use_reprogramming:
            if self.use_residual_in_reprogramming:
                print("使用ReprogrammingLayer与tokenizer做注意力（带残差连接）")
                self.reprogramming_layer = ReprogrammingLayer(
                    d_model=self.d_model,
                    n_heads=configs.n_heads,
                    d_llm=self.llm_output_dim,  # 使用LLM的嵌入维度
                    attention_dropout=configs.dropout
                )
            else:
                print("使用ReprogrammingLayer与tokenizer做注意力（无残差连接）")
                self.reprogramming_layer = NoResidualReprogrammingLayer(
                    d_model=self.d_model,
                    n_heads=configs.n_heads,
                    d_llm=self.llm_output_dim,  # 使用LLM的嵌入维度
                    attention_dropout=configs.dropout
                )
        else:
            print("使用直接线性映射到大模型维度")
            self.linear_projection = nn.Linear(self.d_model, self.llm_output_dim)
        
        # 探头相关配置
        self.total_probes = 481  # 总探头数(修改为481)
        self.n_probes = configs.n_probes  # 需要选择的探头数，默认为16
        
        # 探头选择层 - 使用任务特定的特征维度
        self.task_feature_dim = 512  # 与forward中的target_feature_dim保持一致
        self.probe_selection = nn.Linear(self.task_feature_dim, self.total_probes)
        
        # 添加基于注意力的探头选择机制
        self.use_attention_probe_selection = getattr(configs, 'use_attention_probe_selection', False)
        if self.use_attention_probe_selection:
            print("使用基于注意力的探头选择机制")
            # 探头查询向量，可学习 - 使用任务特定的特征维度
            self.probe_queries = nn.Parameter(torch.randn(self.total_probes, self.task_feature_dim))
            # 多头注意力层 - 使用任务特定的特征维度
            self.probe_attention = nn.MultiheadAttention(
                embed_dim=self.task_feature_dim, 
                num_heads=8, 
                dropout=configs.dropout,
                batch_first=True
            )
        
        # 添加探头字典：每个探头的特征向量
        # 维度为[total_probes, channel_dim]，其中channel_dim是信道特征维度
        if self.use_spatial_corr:
            # 对于空间相关性数据，直接使用spatial_corr_dim
            self.channel_dim = self.spatial_corr_dim  # 128 = 64实部 + 64虚部
        else:
            # 对于普通信道数据，使用簇数 * 每个簇的特征数
            self.channel_dim = self.n_clusters * self.feature_dim
        
        # 从.mat文件加载预定义的探头字典
        try:
            dict_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 
                                   'dataset/dataset/phasecha.mat')
            mat_data = scipy.io.loadmat(dict_path)
            dictionary = mat_data["phasecha"]  # 获取字典矩阵
            
            # 检查字典矩阵大小和形状
            print(f"原始字典矩阵形状: {dictionary.shape}")
            print(f"字典矩阵是否包含复数: {np.iscomplexobj(dictionary)}")
            if np.iscomplexobj(dictionary):
                print(f"复数值范围: 实部[{np.real(dictionary).min():.4f}, {np.real(dictionary).max():.4f}], 虚部[{np.imag(dictionary).min():.4f}, {np.imag(dictionary).max():.4f}]")
                print(f"非零值比例: 实部{np.count_nonzero(np.real(dictionary)) / dictionary.size * 100:.2f}%, 虚部{np.count_nonzero(np.imag(dictionary)) / dictionary.size * 100:.2f}%")
            else:
                print(f"实数值范围: [{dictionary.min():.4f}, {dictionary.max():.4f}]")
                print(f"非零值比例: {np.count_nonzero(dictionary) / dictionary.size * 100:.2f}%")
            
            # 根据字典矩阵的实际维度更新相关参数
            if dictionary.shape[0] == 64:
                # 矩阵形状为 [64, total_probes]
                self.feature_dim_dict = 64  # 特征维度，应该是64
                self.total_probes = dictionary.shape[1]   # 总探头数，应该是481
                print(f"字典矩阵维度: 特征维度={self.feature_dim_dict}, 总探头数={self.total_probes}")
                
                # 分离实部和虚部
                dictionary_real = np.real(dictionary)
                dictionary_imag = np.imag(dictionary)
                
                # 拼接实部和虚部
                dictionary_combined = np.concatenate((dictionary_real, dictionary_imag), axis=0)  # (128, total_probes)
                
                # 更新探头选择投影层以匹配实际探头数量和任务特定特征维度
                self.probe_selection = nn.Linear(self.task_feature_dim, self.total_probes)
                
                # 转换为PyTorch张量
                self.probe_dictionary = nn.Parameter(torch.FloatTensor(dictionary_combined))
                print(f"处理后的探头字典参数形状: {self.probe_dictionary.shape}")
            else:
                raise ValueError(f"字典矩阵形状不正确: {dictionary.shape}")
        except Exception as e:
            print(f"加载探头字典失败: {e}")
            print("使用随机初始化的探头字典")
            # 如果加载失败，使用随机初始化
            self.feature_dim_dict = 64  # 默认特征维度
            self.total_probes = 481     # 默认总探头数
            # 生成复数随机字典并处理
            random_dictionary = np.random.randn(64, self.total_probes) + 1j * np.random.randn(64, self.total_probes)
            dictionary_real = np.real(random_dictionary)
            dictionary_imag = np.imag(random_dictionary)
            dictionary_combined = np.concatenate((dictionary_real, dictionary_imag), axis=0)  # (128, total_probes)
            self.probe_dictionary = nn.Parameter(torch.FloatTensor(dictionary_combined))
            print(f"随机初始化的探头字典形状: {self.probe_dictionary.shape}")
        
        # 最后添加一个SoftMax层确保权重和为1
        self.softmax = nn.Softmax(dim=-1)
        
    def calculate_spatial_correlation_loss(self, channel_reconstructed, original_channel, time_points=None, true_spatial_corr=None):
        """
        计算重构的空间相关性与原始空间相关性之间的损失
        
        Args:
            channel_reconstructed: 重构的空间相关性，形状为[B, T, 128]
            original_channel: 原始空间相关性，形状为[B, T, 128]
            time_points: 要计算相关性的时间点，如果为None则使用所有时间点
            true_spatial_corr: 已弃用，为了兼容性保留
            
        Returns:
            dict: 包含损失指标的字典
        """
        batch_size = original_channel.shape[0]
        
        # 确保输入张量是连续的
        channel_reconstructed = channel_reconstructed.contiguous()
        original_channel = original_channel.contiguous()
        
        # 按照MATLAB公式计算RMSE: sqrt(sum((abs(pred-true))^2, dim=2)/N1)
        # 其中dim=2对应空间特征维度，N1是复数特征数量（64）
        diff = channel_reconstructed - original_channel  # 计算差值
        squared_diff = torch.abs(diff) ** 2  # abs然后平方（等价于直接平方）
        sum_squared_diff = torch.sum(squared_diff, dim=2)  # 沿空间特征维度求和
        N1 = 64  # 复数特征数量（虽然Python中是128个实数特征）
        mse_per_time = sum_squared_diff / N1  # 每个时间点的MSE
        rmse_per_time = torch.sqrt(mse_per_time)  # 每个时间点的RMSE
        rmse_loss = torch.mean(rmse_per_time)  # 对所有时间点和样本取平均
        
        # 确保损失使用与LLM相同的数据类型
        if rmse_loss.dtype != self.dtype:
            rmse_loss = rmse_loss.to(self.dtype)
        
        # 返回损失字典
        loss_dict = {
            'rmse_loss': rmse_loss
        }
        
        return loss_dict
    
    def forward(self, x_enc, x_dec=None, x_mark_enc=None, x_mark_dec=None):
        """
        前向传播
        
        Args:
            x_enc: 编码器输入，可以是以下两种格式：
                  1. 空间相关性数据，形状为[B, T, 128]，其中128=64实部+64虚部
                  2. 字典格式，包含'data', 'spatial_corr_real', 'spatial_corr_imag'等键
            x_dec: 解码器输入，形状为[B, T', N]，这里不使用
            x_mark_enc: 编码器时间标记，形状为[B, T, D]
            x_mark_dec: 解码器时间标记，形状为[B, T', D]，这里不使用
            
        Returns:
            outputs: 预测的探头权重，形状为[B, seq_len, n_probes]
        """
        # 获取模型的数据类型信息
        if hasattr(self, 'dtype'):
            model_dtype = self.dtype
        else:
            # 从GPT2模型参数中推断数据类型
            for param in self.llm.parameters():
                model_dtype = param.dtype
                break
        
        # 处理输入，提取空间相关性数据
        if isinstance(x_enc, dict):
            # 如果输入是字典，获取空间相关性数据
            spatial_corr_real = x_enc.get('spatial_corr_real', None)  # [B, T, 64]
            spatial_corr_imag = x_enc.get('spatial_corr_imag', None)  # [B, T, 64]
            
            # 如果有空间相关性数据的实部和虚部，合并处理
            if spatial_corr_real is not None and spatial_corr_imag is not None:
                # 确保tensors是连续的
                spatial_corr_real = spatial_corr_real.contiguous()
                spatial_corr_imag = spatial_corr_imag.contiguous()
                # 合并实部和虚部
                input_data = torch.cat([spatial_corr_real, spatial_corr_imag], dim=-1)  # [B, T, 128]
                # 检测到空间相关性数据
                if not hasattr(self, 'use_spatial_corr'):
                    self.use_spatial_corr = True
                    self.spatial_corr_dim = 128
                    # print("检测到空间相关性数据")  # 注释掉冗余信息
            else:
                # 直接使用data字段
                input_data = x_enc.get('data', None)  # [B, T, 128]
                
                if input_data is None:
                    raise ValueError("输入字典缺少必要的空间相关性数据")
        else:
            # 假设输入直接是拼接后的空间相关性数据
            input_data = x_enc  # [B, T, 128]
        
        # 获取批次大小和序列长度
        batch_size, seq_len = input_data.shape[0], input_data.shape[1]
        
        # 在这里不再尝试将输入重塑为[batch_size, seq_len, n_clusters, feature_dim]形式
        # 而是直接使用空间相关性数据的原始形状[B, T, 128]
        
        # 确保输入数据类型一致
        input_data = input_data.to(model_dtype)
        
        # 使用StandardNorm进行数据标准化
        # 调整输入数据维度以适应StandardNorm
        # 空间相关性数据的标准化 - 直接处理[B, T, 128]格式
        # 确保输入张量是连续的
        input_data = input_data.contiguous()
        # StandardNorm期望输入格式为[B, C, L]，所以转置维度
        norm_input = input_data.transpose(1, 2).contiguous()  # [B, 128, T]
        normalized_input = self.standard_norm(norm_input, mode='norm')
        normalized_input = normalized_input.transpose(1, 2).contiguous()  # [B, T, 128]
        
        # 使用标准化后的数据进行特征嵌入
        if self.use_minimal_mode:
            # 🚀 极简模式：只使用简单线性投影 + ReprogrammingLayer
            
            # 独立添加位置编码（如果启用）
            if self.use_positional_encoding and hasattr(self, 'pos_embedding'):
                normalized_input = self.pos_embedding(normalized_input)

            # 简单线性投影到d_model维度
            if not hasattr(self, 'minimal_projection'):
                self.minimal_projection = nn.Linear(
                    normalized_input.shape[-1], 
                    self.d_model
                ).to(device=normalized_input.device, dtype=normalized_input.dtype)
            
            enc_out = self.minimal_projection(normalized_input)  # [B, T, d_model]
            
        elif self.use_patch_embedding:
            # 应用PatchEmbedding到标准化后的数据，先将数据调整为适合的形状
            # 对于空间相关性数据，我们可以将其视为128个通道，每个通道有seq_len个时间点
            x_patch = normalized_input.permute(0, 2, 1).contiguous()  # [B, 128, T]
            
            # 应用PatchEmbedding
            enc_out, n_vars = self.patch_embedding(x_patch)  # [(B*128), patch_nums, d_model]
            
            # 应用TwoStageAttentionLayer处理多维度特征，如果有的话
            if hasattr(self, 'twostageatt') and self.use_two_stage_attention and not self.use_transformer:
                # 重塑以适应TwoStageAttention，确保张量连续
                enc_out = enc_out.reshape(batch_size, n_vars, -1, self.d_model).contiguous()  # [B, 128, patch_nums, d_model]
                enc_out = self.twostageatt(enc_out)  # [B, patch_nums, feature_dim]
            elif hasattr(self, 'transformer_encoder') and self.use_transformer:
                # 如果使用Transformer编码器，需要调整维度
                enc_out = enc_out.reshape(batch_size, n_vars, -1, self.d_model).contiguous()  # [B, 128, patch_nums, d_model]
                # 对n_vars维度平均，得到 [B, patch_nums, d_model]
                enc_out = enc_out.mean(dim=1).contiguous()
                # 应用位置编码
                enc_out = self.pos_embedding(enc_out)
                # 应用Transformer编码器
                enc_out = self.transformer_encoder(enc_out)  # [B, patch_nums, d_model]
            else:
                # 不使用TwoStageAttention时，直接展平或平均处理
                enc_out = enc_out.reshape(batch_size, n_vars, -1, self.d_model).contiguous()  # [B, 128, patch_nums, d_model]
                # 简单地对n_vars维度取平均，保持维度结构简单
                enc_out = enc_out.mean(dim=1).contiguous()  # [B, patch_nums, d_model]
        else:
            # 不使用PatchEmbedding时的替代处理方式
            # 直接将标准化的数据映射到模型维度
            # 空间相关性数据的维度为[B, T, 128]
            
            # 创建带残差连接的投影层（如果还没有创建）
            if not hasattr(self, 'input_projection_block'):
                self.input_projection_block = SimplifiedProjectionBlock(
                    input_dim=normalized_input.shape[-1],
                    hidden_dim=self.d_model * 2,
                    output_dim=self.d_model,
                    dropout=0.1
                ).to(device=normalized_input.device, dtype=normalized_input.dtype)
                # print(f"创建输入投影简化块: {normalized_input.shape[-1]} -> {self.d_model}")  # 注释掉冗余信息
            
            # 应用带残差连接的投影
            enc_out = self.input_projection_block(normalized_input)
            
            # 如果使用Transformer，应用位置编码和Transformer编码器
            if hasattr(self, 'transformer_encoder') and self.use_transformer:
                if hasattr(self, 'pos_embedding'):
                    # 直接使用位置编码，移除多余的残差连接
                    enc_out = self.pos_embedding(enc_out)
                
                # 应用Transformer编码器（内部已有残差连接）
                enc_out = self.transformer_encoder(enc_out)
            
            # 在不使用patch_embedding时确保enc_out形状正确
            # 对于空间相关性数据，在不使用patch时直接保持形状为[B, seq_len, d_model]
            if hasattr(self, 'use_spatial_corr') and self.use_spatial_corr:
                # 已经正确形状，不需要额外操作
                pass
            else:
                # 对于其他数据类型可能需要额外处理
                pass
        
        # 获取映射后的词嵌入，确保张量连续
        if self.use_qwen:
            # 对于Qwen，我们需要先投影嵌入向量，而不是词表维度
            embedded_tokens = self.mapping_layer(self.word_embeddings)  # [vocab_size, num_tokens]
            source_embeddings = embedded_tokens.permute(1, 0).contiguous()  # [num_tokens, vocab_size]
        else:
            source_embeddings = self.mapping_layer(self.word_embeddings.permute(1, 0)).permute(1, 0).contiguous()
        
        # 应用ReprogrammingLayer或线性映射
        reprogramming_output = None  # 保存ReprogrammingLayer输出用于残差连接
        
        if self.use_reprogramming:
            # 直接在完整嵌入空间中做attention
            gpt2_inputs = self.reprogramming_layer(
                enc_out, 
                self.word_embeddings,  # 使用完整的词嵌入
                self.word_embeddings
            )
            # 保存ReprogrammingLayer的输出用于残差连接
            if self.use_residual_after_llm:
                reprogramming_output = gpt2_inputs.clone()
        else:
            # 使用带残差连接的线性投影到LLM嵌入维度
            if not hasattr(self, 'linear_projection_block'):
                self.linear_projection_block = SimplifiedProjectionBlock(
                    input_dim=enc_out.shape[-1],
                    hidden_dim=self.llm_output_dim,
                    output_dim=self.llm_output_dim,
                    dropout=0.1
                ).to(device=enc_out.device, dtype=enc_out.dtype)
            
            gpt2_inputs = self.linear_projection_block(enc_out)
        
        # 恢复prompt模式：处理提示信息，结合空间相关性数据
        # print("使用prompt模式：添加任务描述前缀")  # 注释掉冗余信息
        
        # 获取模型数据类型
        model_dtype = next(self.llm.parameters()).dtype
        # print(f"LLM模型数据类型: {model_dtype}")  # 注释掉冗余信息
        
        # 创建prompt文本
        batch_size = gpt2_inputs.shape[0]
        prompt = []
        for b in range(batch_size):
            prompt_text = f"<|start_prompt|>{self.description}<|end_prompt|>"
            prompt.append(prompt_text)
        
        # 使用tokenizer处理提示文本
        prompt_ids = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128).input_ids.to(gpt2_inputs.device)
        
        # 获取提示词嵌入
        if self.use_qwen:
            if hasattr(self.llm, 'model') and hasattr(self.llm.model, 'embed_tokens'):
                prompt_embeddings = self.llm.model.embed_tokens(prompt_ids)
            else:
                # 对于 LoRA 模型或没有直接embed_tokens访问的情况，使用 get_input_embeddings
                prompt_embeddings = self.llm.get_input_embeddings()(prompt_ids)
        else:
            prompt_embeddings = self.llm.get_input_embeddings()(prompt_ids)
        
        # 确保提示词嵌入的数据类型与LLM模型匹配
        if prompt_embeddings.dtype != model_dtype:
            prompt_embeddings = prompt_embeddings.to(model_dtype)
        
        # 将提示词嵌入与输入特征拼接
        gpt2_inputs_with_prompt = torch.cat([prompt_embeddings, gpt2_inputs], dim=1)
        
        # 根据使用的模型类型处理输入
        if self.use_qwen:
            # 通过Qwen模型，输入包含prompt的嵌入
            qwen_outputs = self.llm(inputs_embeds=gpt2_inputs_with_prompt, output_hidden_states=True)
            
            # 简化：只使用最后一层的输出
            if hasattr(qwen_outputs, 'hidden_states'):
                gpt2_outputs = qwen_outputs.hidden_states[2]  # 直接使用最后一层
            elif hasattr(qwen_outputs, 'last_hidden_state'):
                gpt2_outputs = qwen_outputs.last_hidden_state
            else:
                gpt2_outputs = qwen_outputs[0]
            
            # 清理输出对象
            del qwen_outputs
            torch.cuda.empty_cache()
        else:
            # 通过GPT2模型
            gpt2_outputs = self.llm(inputs_embeds=gpt2_inputs_with_prompt).last_hidden_state
        
        # 使用prompt，需要处理LLM输出的特征并跳过prompt部分
        # 提取相关的输出，跳过提示部分
        prompt_len = prompt_ids.shape[1]
        data_len = gpt2_inputs.shape[1]  # 原始数据长度
        full_sequence_len = gpt2_outputs.shape[1]
        
        # print(f"prompt模式 - 总序列长度: {full_sequence_len}, prompt长度: {prompt_len}, 数据长度: {data_len}")  # 注释掉冗余信息
        # print(f"提取LLM输出中的数据部分特征...")  # 注释掉冗余信息
        
        # 跳过prompt部分，只使用数据对应的输出
        relevant_outputs = gpt2_outputs[:, prompt_len:, :].contiguous()  # [B, data_len, hidden_size]
        # 🔥 新增：LLM输出dropout - 防止记忆训练样本的复杂模式
        relevant_outputs = self.dropout(relevant_outputs)
        
        # print(f"prompt模式：提取数据部分特征 {relevant_outputs.shape}")  # 注释掉冗余信息
        # print(f"每个位置对应一个时间步，保留完整的时序信息")  # 注释掉冗余信息
        
        # 在大模型输出后添加残差连接（如果启用）
        if self.use_residual_after_llm and reprogramming_output is not None:
            # 确保维度匹配
            if relevant_outputs.shape[-1] != reprogramming_output.shape[-1]:
                # 创建投影层来匹配维度
                if not hasattr(self, 'residual_projection_after_llm'):
                    self.residual_projection_after_llm = nn.Linear(
                        reprogramming_output.shape[-1], 
                        relevant_outputs.shape[-1]
                    ).to(device=relevant_outputs.device, dtype=relevant_outputs.dtype)
                    print(f"创建LLM后残差投影层: {reprogramming_output.shape[-1]} -> {relevant_outputs.shape[-1]}")
                
                # 投影ReprogrammingLayer输出到LLM输出维度
                reprogramming_projected = self.residual_projection_after_llm(reprogramming_output)
            else:
                reprogramming_projected = reprogramming_output
            
            # 添加残差连接
            relevant_outputs = relevant_outputs + reprogramming_projected
            # 🔥 新增：残差融合后dropout - 防止复杂非线性依赖
            relevant_outputs = self.dropout(relevant_outputs) 
            print(f"✅ 已添加LLM后残差连接: LLM输出 + ReprogrammingLayer输出")
        
        # 添加任务特定的特征处理
        if not hasattr(self, 'task_specific_projection'):
            self.task_specific_projection = nn.Sequential(
                nn.Linear(relevant_outputs.shape[-1], 512),
                nn.LayerNorm(512),
                nn.SiLU(),  # 🔥 从GELU改为SiLU（Swish）激活函数
                nn.Dropout(0.3),  # 🔥 从0.2增加到0.3：任务特定特征dropout
                nn.Linear(512, 512),
                nn.Dropout(0.25)  # 🔥 从0.15增加到0.25：最终特征dropout
            ).to(device=relevant_outputs.device, dtype=relevant_outputs.dtype)
            # print(f"创建任务特定投影层: {relevant_outputs.shape[-1]} -> 512")
        
        # 应用任务特定投影
        task_features = self.task_specific_projection(relevant_outputs)
        # 🔥 新增：探头选择前dropout - 防止过度依赖特定特征组合
        task_features = self.dropout(task_features)
        # print(f"任务特定投影后维度: {task_features.shape}")
        
        # 探头选择和权重计算
        if not hasattr(self, 'probe_selection'):
            self.probe_selection = nn.Linear(512, self.total_probes).to(device=task_features.device, dtype=task_features.dtype)
            # print(f"创建探头选择层: 512 -> {self.total_probes}")
        
        if not hasattr(self, 'weights_projection'):
            self.weights_projection = nn.Linear(512, self.n_probes).to(device=task_features.device, dtype=task_features.dtype)
            # print(f"创建权重投影层: 512 -> {self.n_probes}")
        
        # 计算探头分数和权重
        probe_scores = self.probe_selection(task_features)  # [B, seq_len, total_probes]
        probe_weights = self.weights_projection(task_features)  # [B, seq_len, n_probes]
        
        # print(f"模型输出维度: {task_features.shape}, probe_selection权重维度: {self.probe_selection.weight.shape}")  # 注释掉冗余信息
        
        # 检查维度匹配
        if task_features.shape[-1] != self.probe_selection.weight.shape[1]:
            # print(f"❌ 任务特定特征维度不匹配: {task_features.shape[-1]} != {self.probe_selection.weight.shape[1]}")  # 注释掉冗余信息
            # 创建新的投影层来匹配维度
            self.probe_selection = nn.Linear(task_features.shape[-1], self.total_probes).to(device=task_features.device, dtype=task_features.dtype)
            self.weights_projection = nn.Linear(task_features.shape[-1], self.n_probes).to(device=task_features.device, dtype=task_features.dtype)
            probe_scores = self.probe_selection(task_features)
            probe_weights = self.weights_projection(task_features)
            # print(f"✅ 已重新创建投影层，维度匹配: {task_features.shape[-1]} = {self.probe_selection.weight.shape[1]}")  # 注释掉冗余信息
        else:
            # print(f"✅ 任务特定特征维度匹配: {task_features.shape[-1]} = {self.probe_selection.weight.shape[1]}")  # 注释掉冗余信息
            pass
        
        # 2. 获取top-k(即n_probes)个探头的索引和对应分数
        top_values, top_indices = torch.topk(probe_scores, k=self.n_probes, dim=-1)  # [B, seq_len, n_probes]
        
        # 3. 基于选出的探头分数计算权重(确保和为1且大于0)
        # 直接对选出的探头分数进行softmax，得到归一化的权重
        probe_weights = self.softmax(top_values).contiguous()  # [B, seq_len, n_probes]
        
        # 4. 根据选中的探头和权重重构信道特征
        # 使用优化版本的重构函数 - 更高效的批量实现
        channel_reconstructed = self.reconstruct_channel_optimized(top_indices, probe_weights, input_data)
        
        # 创建探头-权重对矩阵，包含探头序号和对应权重
        # 形状为 [B, seq_len, n_probes, 2]
        # 每个位置存储 [探头序号, 对应权重]
        probe_weight_pairs = torch.zeros(batch_size, seq_len, self.n_probes, 2, device=top_indices.device)
        probe_weight_pairs[..., 0] = top_indices.float().contiguous()  # 第一列存储探头序号
        probe_weight_pairs[..., 1] = probe_weights.contiguous()         # 第二列存储对应权重
        
        # 计算空间相关性损失
        spatial_corr_loss = self.calculate_spatial_correlation_loss(
            channel_reconstructed,
            input_data
        )
        
        # 返回结果：探头索引、对应权重和重构的空间相关性数据
        result = {
            'probe_indices': top_indices,                  # 形状: [B, seq_len, n_probes]
            'probe_weights': probe_weights,                # 形状: [B, seq_len, n_probes]
            'probe_weight_pairs': probe_weight_pairs,      # 形状: [B, seq_len, n_probes, 2]
            'total_probe_scores': probe_scores,            # 形状: [B, seq_len, total_probes]，调试用
            'channel_reconstructed': channel_reconstructed, # 形状: [B, seq_len, 128]
            'spatial_rmse_loss': spatial_corr_loss['rmse_loss']
        }
            
        return result
    
    def reconstruct_channel(self, probe_indices, probe_weights, original_data):
        """
        使用选中的探头和权重重构空间相关性数据 - 采用完整权重向量的矩阵乘法方式
        
        Args:
            probe_indices: 选中的探头索引，形状为[B, seq_len, n_probes]
            probe_weights: 对应的探头权重，形状为[B, seq_len, n_probes]
            original_data: 原始空间相关性数据，形状为[B, T, 128]
            
        Returns:
            reconstructed: 重构的空间相关性数据，形状为[B, T, 128]
        """
        batch_size, seq_len, n_probes = probe_indices.shape
        # 字典矩阵形状为[feature_dim, total_probes]
        feature_dim_dict = self.probe_dictionary.shape[0]  # 字典的特征维度，如64
        total_probes = self.probe_dictionary.shape[1]      # 总探头数，如481
        
        # 方法2: 创建完整的权重向量，大部分为0，只有选中的探头有非零权重
        # 初始化重构结果
        reconstructed = torch.zeros(batch_size, seq_len, feature_dim_dict, device=probe_indices.device)
        
        # 对每个批次和时间步
        for b in range(batch_size):
            for t in range(seq_len):
                # 创建完整的权重向量(所有探头)
                full_weights = torch.zeros(total_probes, device=probe_indices.device)
                
                # 将选中探头的权重填入完整权重向量
                full_weights.index_put_((probe_indices[b, t],), probe_weights[b, t])
                
                # 直接用完整权重向量与字典矩阵做矩阵乘法
                reconstructed[b, t] = torch.matmul(full_weights, self.probe_dictionary.t())
        
        return reconstructed
        
    def reconstruct_channel_optimized(self, probe_indices, probe_weights, original_data):
        """
        使用选中的探头和权重重构空间相关性数据 - 批量处理的高效实现
        
        Args:
            probe_indices: 选中的探头索引，形状为[B, seq_len, n_probes]
            probe_weights: 对应的探头权重，形状为[B, seq_len, n_probes]
            original_data: 原始空间相关性数据，形状为[B, T, 128]
            
        Returns:
            reconstructed: 重构的空间相关性数据，形状为[B, T, 128]
        """
        batch_size, seq_len, n_probes = probe_indices.shape
        # 字典矩阵形状为[feature_dim*2, total_probes]，例如[128, 481]
        # 其中128 = 64实部 + 64虚部
        feature_dim_total = self.probe_dictionary.shape[0]  # 字典的特征总维度，如128 (64实部+64虚部)
        total_probes = self.probe_dictionary.shape[1]      # 总探头数，如481
        
        # 确保探头字典是连续的
        probe_dict = self.probe_dictionary.contiguous()
        
        # 获取probe_weights的数据类型
        weights_dtype = probe_weights.dtype
        
        # 使用高效的向量化实现
        # 初始化完整权重矩阵 [B, seq_len, total_probes]，并确保数据类型与probe_weights一致
        full_weights = torch.zeros(batch_size, seq_len, total_probes, device=probe_indices.device, dtype=weights_dtype)
        
        # 确保probe_indices是长整型，避免索引问题
        probe_indices = probe_indices.long().contiguous()
        # 确保probe_weights是连续的
        probe_weights = probe_weights.contiguous()
        
        # 对每个批次和序列位置单独填充
        for b in range(batch_size):
            for t in range(seq_len):
                # 使用scatter_方法将权重填入相应位置
                full_weights[b, t].scatter_(0, probe_indices[b, t], probe_weights[b, t])
        
        # 确保完整权重矩阵是连续的
        full_weights = full_weights.contiguous()
        
        # 将full_weights重塑为[B*seq_len, total_probes]以便进行批量矩阵乘法
        full_weights_reshaped = full_weights.reshape(-1, total_probes).contiguous()
        
        # 确保probe_dict的数据类型与full_weights_reshaped匹配
        if probe_dict.dtype != full_weights_reshaped.dtype:
            probe_dict = probe_dict.to(dtype=full_weights_reshaped.dtype)
        
        # 矩阵乘法: [B*seq_len, total_probes] x [feature_dim*2, total_probes]^T
        # 注意：字典矩阵是[feature_dim*2, total_probes]，需要转置后再相乘
        # 对于空间相关性数据，feature_dim*2 = 128 (64实部 + 64虚部)
        # 结果是 [B*seq_len, feature_dim*2]
        reconstructed = torch.matmul(full_weights_reshaped, probe_dict.t().contiguous())
        
        # 重塑回 [B, seq_len, feature_dim*2]，即[B, seq_len, 128]形状的空间相关性数据
        reconstructed = reconstructed.reshape(batch_size, seq_len, feature_dim_total).contiguous()
        
        return reconstructed 