import torch
from torch import nn, Tensor
from labml_helpers.module import Module
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import MultiHeadAttention
from labml_nn.utils import clone_module_list
import torch.nn.functional as F
from group import Group
from pointNet import Pointnet
from featurePropagation import PointNetFeaturePropagation
from util import sample_and_group
from pointnet_util import farthest_point_sample, index_points, square_distance
from einops import rearrange, repeat
from timm.models.layers import DropPath
ACTIVATION = {'gelu': nn.GELU, 'tanh': nn.Tanh, 'sigmoid': nn.Sigmoid, 'relu': nn.ReLU, 'leaky_relu': nn.LeakyReLU(0.1),
              'softplus': nn.Softplus, 'ELU': nn.ELU, 'silu': nn.SiLU}






class CrossAttentionModule(torch.nn.Module):
    def __init__(self, embed_size, num_heads):
        super(CrossAttentionModule, self).__init__()
        self.embed_size = embed_size
        self.num_heads = num_heads
        
        # 使用线性层来生成 Q, K, V
        self.query_linear = torch.nn.Linear(embed_size, embed_size)  # 生成 Q
        self.key_linear = torch.nn.Linear(embed_size, embed_size)    # 生成 K
        self.value_linear = torch.nn.Linear(embed_size, embed_size)  # 生成 V
        
        self.attn_dropout = torch.nn.Dropout(0.1)
        self.output_linear = torch.nn.Linear(embed_size, embed_size)

    def forward(self, pts, cubesize):
        # pts 和 cubesize 都是 [B, N, 128] 的张量
        batch_size = pts.size(0)
        
        # 线性变换生成 Q, K, V
        Q = self.query_linear(cubesize)  # [B, 128, C] 以 cubesize 作为 Q
        K = self.key_linear(pts)  # [B, 128, C] 以 pts 作为 K
        V = self.value_linear(pts)  # [B, 128, C] 以 pts 作为 V

        # 计算注意力得分 (点积注意力)
        scores = torch.matmul(Q, K.transpose(-2, -1))  # [B, 128, 128]（点积结果）
        
        # 缩放
        scores = scores / (self.embed_size ** 0.5)
        
        # 应用 Softmax 来获得注意力权重
        attention_weights = F.softmax(scores, dim=-1)  # [B, 128, 128]
        attention_weights = self.attn_dropout(attention_weights)

        # 使用注意力权重加权求和
        output = torch.matmul(attention_weights, V)  # [B, 128, C]

        # 通过线性层输出
        output = self.output_linear(output)  # [B, 128, C]
        
        return output

# class AFNO1D(nn.Module):
#     """
#     hidden_size: channel dimension size
#     num_blocks: how many blocks to use in the block diagonal weight matrices (higher => less complexity but less parameters)
#     sparsity_threshold: lambda for softshrink
#     hard_thresholding_fraction: how many frequencies you want to completely mask out (lower => hard_thresholding_fraction^2 less FLOPs)
#     """
#     def __init__(self, hidden_size, num_blocks=8, sparsity_threshold=0.01, hard_thresholding_fraction=1, hidden_size_factor=1):
#         super().__init__()
#         assert hidden_size % num_blocks == 0, f"hidden_size {hidden_size} should be divisble by num_blocks {num_blocks}"

#         self.hidden_size = hidden_size
#         self.sparsity_threshold = sparsity_threshold
#         self.num_blocks = num_blocks
#         self.block_size = self.hidden_size // self.num_blocks
#         self.hard_thresholding_fraction = hard_thresholding_fraction
#         self.hidden_size_factor = hidden_size_factor
#         self.scale = 0.02

#         self.w1 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size, self.block_size * self.hidden_size_factor))
#         self.b1 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor))
#         self.w2 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor, self.block_size))
#         self.b2 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size))

#     def forward(self, x):
#         bias = x

#         dtype = x.dtype
#         x = x.float()
#         B, N, C = x.shape

#         x = torch.fft.rfft(x, dim=1, norm="ortho")
#         x = x.reshape(B, N // 2 + 1, self.num_blocks, self.block_size)

#         o1_real = torch.zeros([B, N // 2 + 1, self.num_blocks, self.block_size * self.hidden_size_factor], device=x.device)
#         o1_imag = torch.zeros([B, N // 2 + 1, self.num_blocks, self.block_size * self.hidden_size_factor], device=x.device)
#         o2_real = torch.zeros(x.shape, device=x.device)
#         o2_imag = torch.zeros(x.shape, device=x.device)

#         total_modes = N // 2 + 1
#         kept_modes = int(total_modes * self.hard_thresholding_fraction)

#         o1_real[:, :kept_modes] = F.relu(
#             torch.einsum('...bi,bio->...bo', x[:, :kept_modes].real, self.w1[0]) - \
#             torch.einsum('...bi,bio->...bo', x[:, :kept_modes].imag, self.w1[1]) + \
#             self.b1[0]
#         )

#         o1_imag[:, :kept_modes] = F.relu(
#             torch.einsum('...bi,bio->...bo', x[:, :kept_modes].imag, self.w1[0]) + \
#             torch.einsum('...bi,bio->...bo', x[:, :kept_modes].real, self.w1[1]) + \
#             self.b1[1]
#         )

#         o2_real[:, :kept_modes] = (
#             torch.einsum('...bi,bio->...bo', o1_real[:, :kept_modes], self.w2[0]) - \
#             torch.einsum('...bi,bio->...bo', o1_imag[:, :kept_modes], self.w2[1]) + \
#             self.b2[0]
#         )

#         o2_imag[:, :kept_modes] = (
#             torch.einsum('...bi,bio->...bo', o1_imag[:, :kept_modes], self.w2[0]) + \
#             torch.einsum('...bi,bio->...bo', o1_real[:, :kept_modes], self.w2[1]) + \
#             self.b2[1]
#         )

#         x = torch.stack([o2_real, o2_imag], dim=-1)
#         x = F.softshrink(x, lambd=self.sparsity_threshold)
#         x = torch.view_as_complex(x)
#         x = x.reshape(B, N // 2 + 1, C)
#         x = torch.fft.irfft(x, n=N, dim=1, norm="ortho")
#         x = x.type(dtype)
#         return x + bias


# class Net(nn.Module):
#     def __init__(
#             self,
#             dim: int,
#             heads: int,
#             group_size: int,
#             num_group: int,
#             mult: int = 4,
#             dropout: float = 0.1, #0.2
#             num_experts: int = 12,
#             depth: int = 6

#     ):
#         super().__init__()

#         self.norm = nn.LayerNorm(dim)

#         self.flow_conv = nn.Sequential(nn.Conv1d(3, 64, kernel_size=1),
#                                        nn.BatchNorm1d(64),
#                                        nn.GELU(),
#                                        nn.Conv1d(64, 512, kernel_size=1),
#                                        nn.BatchNorm1d(512),
#                                        nn.GELU(),
#                                        nn.Conv1d(512, 1536, kernel_size=1), # 512
#                                        nn.BatchNorm1d(1536), #512
#                                        nn.GELU()
#                                        )

#         # self.propagation_0 = PointNetFeaturePropagation(in_channel=768 + 3,
#         #                                                 mlp_channels=[1024, 1208, 1536])
#         self.boundary_net = nn.Sequential(
#             nn.Linear(6, 64),
#             nn.GELU(),
#             nn.Linear(64, 256),
#             nn.GELU(),
#             nn.Linear(256, 64),
#             nn.GELU(),
#             nn.Linear(64, 2),
#         )

#         # self.to_out = nn.Sequential(
#         #     nn.Conv1d(3072, 1024, 1),
#         #     nn.BatchNorm1d(1024),
#         #     nn.GELU(),
#         #     nn.Dropout(0.1),
#         #     nn.Conv1d(1024, 1024, 1),
#         #     nn.BatchNorm1d(1024),
#         #     nn.GELU(),
#         #     nn.Dropout(0.1),
#         #     nn.Conv1d(1024, 1024, 1),
#         #     nn.BatchNorm1d(1024),
#         #     nn.GELU(),
#         #     nn.Conv1d(1024, 1, 1)
#         # )
        
#         self.to_out_new = nn.Sequential(
#                 nn.Conv1d(1536, 512, 1),
#                 nn.BatchNorm1d(512),
#                 nn.ReLU(),
#                 nn.Dropout(0.1),
#                 nn.Conv1d(512,256, 1),
#                 nn.BatchNorm1d(256),
#                 nn.ReLU(),
#                 nn.Dropout(0.1),
#                 nn.Conv1d(256, 64, 1),
#                 nn.BatchNorm1d(64),
#                 nn.ReLU(),
#                 nn.Conv1d(64, 1, 1)
#             )
        
        
#         #self.conv1 = nn.Conv1d(512, 64, kernel_size=1, bias=False) ### 512  to 3 channels 
#         self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)  ### initial 128 channels
#         self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)  ### initial 128 channels
#         self.bn1 = nn.BatchNorm1d(128)
#         self.bn2 = nn.BatchNorm1d(128)
#         self.gather_local_0 = Local_op(in_channels=256, out_channels=256) # [B, out_channels, npoint]
#         self.gather_local_1 = Local_op(in_channels=512, out_channels=512)
#         # self.pt_last = StackedAttention()
#         self.pt_last = Point_Transformer_Last()
#         self.feat_confuse = CrossAttention(1536)

#         self.relu = nn.ReLU()
#         self.conv_fuse = nn.Sequential(nn.Conv1d(2560, 512, kernel_size=1, bias=False),
#                                    nn.BatchNorm1d(512) ,#   不要归一化试试 5.29
#                                    nn.LeakyReLU(negative_slope=0.2))

#         # self.linear1 = nn.Linear(1024, 512, bias=False)
#         # self.bn6 = nn.BatchNorm1d(512)
#         # self.dp1 = nn.Dropout(p=0.5)
#         # self.linear2 = nn.Linear(512, 256)
#         # self.bn7 = nn.BatchNorm1d(256)
#         # self.dp2 = nn.Dropout(p=0.5)
 
#     def forward(self, pts):
#         B, N, C = pts.shape
        
#         ### flow feature extraction
#         flow_conditions = pts[:, 0:1, 3:6]  # B * 1 * 3
#         # print(f"After conv1: {pts.shape}")
#         flow_conditions = flow_conditions.transpose(2, 1)  # B * 3 * 1
#         flow_feature = self.flow_conv(flow_conditions)  # [B, 52, 1]  有归一化
#         #flow_feature = flow_feature.view(-1, 768, 1).repeat(1, 1, N)  # B*768*N
#         flow_feature = flow_feature.view(-1, 1536, 1).repeat(1, 1, N)  # B*512*N
#         heat_boundary = self.boundary_net(pts[:, 0:1, 0:6]).squeeze(dim=1)  # [B, 2]  ### boundary value
 
#         ### ########################################################
#         # pct-class  
#         x = pts[:, :, :6]   ### coordinate [B,N,3]
#         xyz = pts[:, :, :6]   ### coordinate [B,N,3]
#         x = x.permute(0, 2, 1)   ### transportation  [B,3,N] 
#         batch_size, _, _ = x.size()
#         x = self.relu(self.bn1(self.conv1(x))) # B, D, N ,64  [B,128,N]  ### 64 转换为64通道的
#         # print("x-conv1=", x.shape)    #[B, 128, N]  
#         x = self.relu(self.bn2(self.conv2(x))) # B, D, N, 64  [B,128,N]
#         x = x.permute(0, 2, 1) # [B, N ,128]
       
#         # ### sample and group   @Menghao
#         new_xyz, new_feature = sample_and_group(npoint=N, radius=0.15, nsample=64, xyz=xyz, points=x)
#         # print("new_xyz=", new_xyz.shape) #[8, 3496, 3]
#         #    new_xyz：[B, npoint, nsample, 3],new_feature: [B, npoint, nsample, 3+128=131] 
#         # print("new_feature=", new_feature.shape)    
#         feature_0 = self.gather_local_0(new_feature) #  [B, 256, N]
#         feature = feature_0.permute(0, 2, 1) #  [B, N, 256]
#         # new_xyz = torch.median(x, dim=2)[0]  # [B, npoint, 3] 计算中位数
#         new_xyz, new_feature = sample_and_group(npoint=N, radius=0.2, nsample=64, xyz=new_xyz, points=feature) 
#         feature_1 = self.gather_local_1(new_feature)  ### 256d local feature
#         # print("feature_1=", feature_1.shape) 
#         # x = self.pt_last(feature_1)# [B,4*channal,point] [B,4*512,N]  stack attention
#         ## add posizatio encode
#         x = self.pt_last(feature_1, new_xyz) 
#         x = torch.cat([x, feature_1], dim=1) # [B,5*256,N] [B,N,512+256]  
#         x = self.conv_fuse(x) #[B, 512,N]   L1 global feature 1280 维映射到1024维度，点特征
        
#         #### pool 
#         x_max = torch.max(x, 2)[0] # [B,1024]  池化
#         x_avg = torch.mean(x, 2)  # [B,1024]  
#         x_max_feature = x_max.view(B, -1).unsqueeze(-1).repeat(1, 1, N)  # [B, 512, N] 全局特征
#         x_avg_feature = x_avg.view(B, -1).unsqueeze(-1).repeat(1, 1, N)  # [B, 512, N]
        
#         # x_global_feature = torch.cat((x_max_feature, x_avg_feature, flow_feature), 1) # [B, 1024*3, N]
#         x_global_feature = torch.cat((x_max_feature, x_avg_feature), 1) # [B, 512*2, N]
#         # x_global_feature = x_max_feature # [B, 1024, N]
#         x1= torch.cat((x, x_global_feature), 1) # [B, 512*3, N] 1024*3
        
#         # print("feat_confusefeat_confuse",x1.shape)
#         # x_global_feature = torch.cat((x, flow_feature), 1) # [B, 1536+512, N] 
#         # x2= torch.cat((x1, flow_feature), 1)  # [B, 1024*2, N]
#         x2 = self.feat_confuse(x1, flow_feature)  ### [B, 1536*1, N]  特征融合
#         x_pre = self.to_out_new(x2)  # [B, 1, 1048] 解码
#         x_pre = x_pre.permute(0, 2, 1)  # [B, N, 1]
#         x_pre = x_pre.squeeze(dim=-1) # [B, N]
        
#         # min_val = heat_boundary[:, 0].unsqueeze(1)  # 扩展维度以支持广播，形状为[B, 1]
#         # max_val = heat_boundary[:, 1].unsqueeze(1)
#         min_val = torch.min(heat_boundary[:, 0].unsqueeze(1),heat_boundary[:, 1].unsqueeze(1))
#         max_val = torch.max(heat_boundary[:, 0].unsqueeze(1),heat_boundary[:, 1].unsqueeze(1))
#         x_denorm = x_pre * (max_val - min_val) + min_val
        
#         #x = torch.max(x, 2)[0] # [B, 1024]
#         #x = x.view(batch_size, -1) #[B, 1024]
#         # z_loss=0  ## newadd ldz 
#         # lb_loss=0 ## newadx_prex_pre
        
        
#         # return x_denorm, z_loss, lb_loss
#         return x_denorm
    

class Physics_Attention_Irregular_Mesh(nn.Module):
    def __init__(self, dim, heads=8, dim_head=64, dropout=0., slice_num=64):
        super().__init__()
        inner_dim = dim_head * heads
        self.dim_head = dim_head
        self.heads = heads
        self.scale = dim_head ** -0.5
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)
        self.temperature = nn.Parameter(torch.ones([1, heads, 1, 1]) * 0.5)

        self.in_project_x = nn.Linear(dim, inner_dim)
        self.in_project_fx = nn.Linear(dim, inner_dim)
        self.in_project_slice = nn.Linear(dim_head, slice_num)
        for l in [self.in_project_slice]:
            torch.nn.init.orthogonal_(l.weight)  # use a principled initialization
        self.to_q = nn.Linear(dim_head, dim_head, bias=False)
        self.to_k = nn.Linear(dim_head, dim_head, bias=False)
        self.to_v = nn.Linear(dim_head, dim_head, bias=False)
        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        # B N C
        B, N, C = x.shape

        ### (1) Slice
        fx_mid = self.in_project_fx(x).reshape(B, N, self.heads, self.dim_head) \
            .permute(0, 2, 1, 3).contiguous()  # B H N C
        x_mid = self.in_project_x(x).reshape(B, N, self.heads, self.dim_head) \
            .permute(0, 2, 1, 3).contiguous()  # B H N C
        slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature)  # B H N G
        slice_norm = slice_weights.sum(2)  # B H G
        slice_token = torch.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights)
        slice_token = slice_token / ((slice_norm + 1e-5)[:, :, :, None].repeat(1, 1, 1, self.dim_head))

        ### (2) Attention among slice tokens
        q_slice_token = self.to_q(slice_token)
        k_slice_token = self.to_k(slice_token)
        v_slice_token = self.to_v(slice_token)
        dots = torch.matmul(q_slice_token, k_slice_token.transpose(-1, -2)) * self.scale
        attn = self.softmax(dots)
        attn = self.dropout(attn)
        out_slice_token = torch.matmul(attn, v_slice_token)  # B H G D

        ### (3) Deslice
        out_x = torch.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights)
        out_x = rearrange(out_x, 'b h n d -> b n (h d)')
        return self.to_out(out_x)

class PointNet(nn.Module):  
    # Embedding module  
    def __init__(self, inchannel, encoder_channel):  
        super(PointNet, self).__init__()  
        self.encoder_channel = encoder_channel  
        self.first_conv = nn.Sequential(  
            nn.Conv1d(inchannel, 128, 1),  
            nn.BatchNorm1d(128),  
            # nn.GroupNorm(num_groups=1, num_channels=128),  # 使用GroupNorm  
            nn.GELU(),  
            nn.Conv1d(128, 256, 1)  
        )  
        self.second_conv = nn.Sequential(  
            nn.Conv1d(512, 512, 1),  
            nn.BatchNorm1d(512),  
            # nn.GroupNorm(num_groups=1, num_channels=512),  # 使用GroupNorm  
            nn.GELU(),  
            nn.Conv1d(512, encoder_channel, 1)  
        )  

    def forward(self, point_groups):  
        '''  
        处理点云数据的编码器。这个编码器通过一系列的卷积层处理输入的点群数据，最终生成一个全局特征表示。  
        point_groups : B G N 3  
        -----------------  
        feature_global : B G C  
        '''  
        bs, c, n = point_groups.shape  # B 6 N  

        # 对 point_groups 进行变换，使其形状变为 (B * G, 3, N) 以适配卷积操作  
        # point_groups = point_groups.view(bs * g, 3, n)  

        # encoder  
        feature = self.first_conv(point_groups)  # BG 256 N  

        # 获取最大特征  
        feature_global, _ = torch.max(feature, dim=2, keepdim=True)  # BG 256 1  
        feature_global_expanded = feature_global.expand(-1, -1, n)  # BG 256 N  
        feature = torch.cat([feature_global_expanded, feature], dim=1)  # BG 512 N  

        feature = self.second_conv(feature)  # BG 512 N  

        # feature_global, _ = torch.max(feature, dim=2)  # BG 512  
        return feature  # (B, G, C, N)


class MLP(nn.Module):
    def __init__(self, n_input, n_hidden, n_output, n_layers=1, act='gelu', res=True):
        super(MLP, self).__init__()

        if act in ACTIVATION.keys():
            act = ACTIVATION[act]
        else:
            raise NotImplementedError
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.n_layers = n_layers
        self.res = res
        self.linear_pre = nn.Sequential(nn.Linear(n_input, n_hidden), act())
        self.linear_post = nn.Linear(n_hidden, n_output)
        self.linears = nn.ModuleList([nn.Sequential(nn.Linear(n_hidden, n_hidden), act()) for _ in range(n_layers)])

    def forward(self, x):
        x = self.linear_pre(x)
        for i in range(self.n_layers):
            if self.res:
                x = self.linears[i](x) + x
            else:
                x = self.linears[i](x)
        x = self.linear_post(x)
        return x

class Transolver_block(nn.Module):
    """Transformer encoder block."""

    def __init__(
            self,
            num_heads: int,
            hidden_dim: int,
            dropout: float,
            act='gelu',
            mlp_ratio=4,
            last_layer=False,
            out_dim=1,
            slice_num=16,
    ):
        super().__init__()
        self.last_layer = last_layer
        self.ln_1 = nn.LayerNorm(hidden_dim)
        self.Attn = Physics_Attention_Irregular_Mesh(hidden_dim, heads=num_heads, dim_head=hidden_dim // num_heads,
                                                     dropout=dropout, slice_num=slice_num)
        self.LN2 = nn.LayerNorm(hidden_dim)
        self.mlp = MLP(hidden_dim, hidden_dim * mlp_ratio, hidden_dim, n_layers=0, res=False, act=act)
        if self.last_layer:
            self.ln_3 = nn.LayerNorm(hidden_dim)
            self.mlp2 = nn.Linear(hidden_dim, out_dim)
        self.ln_4 = nn.Linear(hidden_dim, hidden_dim)
        self.gelu = nn.GELU()
        self.bn_1 = nn.BatchNorm1d(10000)
        

    def forward(self, fx):
        # fx = self.Attn(self.ln_1(fx)) + fx
        # fx = self.mlp(self.ln_2(fx)) + fx
        fa = self.Attn(self.ln_1(fx)) 
        fa = self.gelu(self.LN2(self.ln_4(fx - fa))) + fx ### offset LBA
        fx = self.mlp(fa) + fx 
        if self.last_layer:
            return self.mlp2(self.ln_3(fx))
        else:
            return fx

# class Mlp(nn.Module):
#     def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
#         super().__init__()
#         out_features = out_features or in_features
#         hidden_features = hidden_features or in_features
#         self.fc1 = nn.Linear(in_features, hidden_features)
#         self.act = act_layer()
#         self.fc2 = nn.Linear(hidden_features, out_features)
#         self.drop = nn.Dropout(drop)

#     def forward(self, x):
#         x = self.fc1(x)
#         x = self.act(x)
#         x = self.drop(x)
#         x = self.fc2(x)
#         x = self.drop(x)
#         return x

# class AFNO_Block(nn.Module):
#     def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, h=14, w=8, use_fno=False, use_blocks=False):
#         super().__init__()
#         # args = get_args()
#         self.norm1 = norm_layer(dim)

#         # if "afno" == args.mixing_type:
#         #     self.filter = AdaptiveFourierNeuralOperator(dim, h=h, w=w)
#         # else:
#         #     raise NotImplementedError

#         self.filter = AFNO1D(hidden_size=256, num_blocks=8, sparsity_threshold=0.01, hard_thresholding_fraction=1, hidden_size_factor=1)
        

#         #to be added soon ... @John: pls double check
#         # if args.mixing_type == "afno":
#         #     self.filter = AFNO2D(hidden_size=args.hidden_size, num_blocks=args.fno_blocks, sparsity_threshold=0.01, hard_thresholding_fraction=1, hidden_size_factor=1)
#         #     #self.filter = AdaptiveFourierNeuralOperator(dim, h=h, w=w)
#         # elif args.mixing_type == "bfno":
#         #     self.filter = BFNO2D(hidden_size=768, num_blocks=8, hard_thresholding_fraction=1)
#         # elif args.mixing_type == "sa":
#         #     self.filter = SelfAttention(dim=768, h=14, w=8)
#         # if args.mixing_type == "gfn":
#         #     self.filter = GlobalFilter(dim=768, h=14, w=8)
#         # elif args.mixing_type == "ls":
#         #     self.filter = AttentionLS(dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., rpe=False, nglo=1, dp_rank=2, w=2)



#         self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
#         self.norm2 = norm_layer(dim)
#         mlp_hidden_dim = int(dim * mlp_ratio)
#         self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

#         self.double_skip = True

#     def forward(self, x):
#         residual = x
#         x = self.norm1(x)
#         x = self.filter(x)

#         if self.double_skip:
#             x = x + residual
#             residual = x

#         x = self.norm2(x)
#         x = self.mlp(x)
#         x = self.drop_path(x)
#         x = x + residual
#         return x

# class FeatureEmbedding(nn.Module):
#     def __init__(self, embed_dim=136, num_freqs=10):
#         super(FeatureEmbedding, self).__init__()
        
#         # 正弦嵌入频率
#         self.num_freqs = num_freqs
#         self.embed_dim = embed_dim
        
#         # 对 f1, f2 的线性映射
#         self.fc = nn.Linear(2, embed_dim // 2)
        
#     def positional_encoding(self, coords):
#         """
#         对 [X, Y, Z] 进行正弦嵌入
#         输入形状: [B, N, 3]
#         输出形状: [B, N, num_freqs * 6] (每个坐标映射到 num_freqs * 2)
#         """
#         B, N, _ = coords.shape
#         freqs = 2 ** torch.arange(self.num_freqs, dtype=torch.float32).to(coords.device)
#         # 广播操作，将频率应用到每个点云坐标
#         coords = coords[..., None]  # [B, N, 3, 1]
#         pos_enc = torch.cat([torch.sin(coords * freqs), torch.cos(coords * freqs)], dim=-1)
#         return pos_enc.view(B, N, -1)  # 展平成 [B, N, num_freqs * 6]
    
#     def forward(self, x):
#         """
#         输入形状: [B, N, 5] 
#         输出形状: [B, N, embedding_dim]
#         """
#         # 将输入拆分为坐标部分和属性部分
#         coords, features = x[..., :3], x[..., 3:]
        
#         # 对坐标部分进行正弦嵌入
#         coord_embed = self.positional_encoding(coords)  # [B, N, num_freqs * 6] 60
        
#         # 对属性部分进行全连接映射
#         B, N, _ = features.shape
#         features = features.view(B * N, -1)  # 展平成 [B * N, 2]
#         feature_embed = F.relu(self.fc(features))  # [B * N, embed_dim // 2]
#         feature_embed = feature_embed.view(B, N, -1)  # 恢复为 [B, N, embed_dim // 2] 32
        
#         # 融合嵌入
#         output = torch.cat([coord_embed, feature_embed], dim=-1)  # [B, N, embedding_dim]
#         return output

class phsoffNet(nn.Module):
    def __init__(
            self,
            dim: int,
            dropout: float = 0.2, #0.2
            n_layers = 4,
            n_hidden = 128,
            # dropout=0,
            n_head=8,
            act='gelu',
            mlp_ratio=1,
            out_dim=1,
            slice_num= 16,

    ):
        super().__init__()

        self.flow_net = nn.Sequential(
            nn.Linear(3, 64),
            nn.GELU(),
            nn.Linear(64, 128),
            nn.GELU(),
            nn.Linear(128, 512),
            nn.GELU(),
            nn.Linear(512, 128),
            # nn.GELU(),
            # nn.Linear(256, 256),
        )

        self.point_net = PointNet(inchannel=3, encoder_channel=128)
        self.cross_attention = CrossAttentionModule(embed_size=128, num_heads=8)
        self.blocks = nn.ModuleList([Transolver_block(num_heads=n_head, hidden_dim=n_hidden,
                                                      dropout=dropout,
                                                      act=act,
                                                      mlp_ratio=mlp_ratio,
                                                      out_dim=out_dim,
                                                      slice_num=slice_num,
                                                      last_layer=(_ == n_layers - 1))
                            
                                     for _ in range(n_layers)])
 
    def forward(self, pts,cubesize):
        B, N, C = pts.shape
        pts = pts.permute(0,2,1) # [B, C ,N] 
        x = self.point_net(pts) ### [B,128,N]
        x = x.permute(0,2,1) # [B, N ,128] 
        cubesize = self.flow_net(cubesize)  # [B, N ,128] ##
        fx = self.cross_attention(x, cubesize)

        for block in self.blocks:
            fx = block(fx)    ## 
            
        x_pre = fx.squeeze(dim=-1) # [B, N]
        x_pre = x_pre.mean(dim=1, keepdim=True)
        x_pre = x_pre.squeeze(dim=-1) # [B, N]

        return x_pre
