import torch
import torch.nn as nn
from torch_scatter import scatter_mean, scatter_add
from einops import rearrange

# 设置随机种子以确保结果可重复
torch.manual_seed(42)

class Physics_Attention_1D(nn.Module):
    def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, slice_num=64):
        super().__init__()
        inner_dim = dim_head * heads
        self.dim_head = dim_head
        self.heads = heads
        self.scale = dim_head**-0.5
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)
        
        self.temperature = nn.Parameter(torch.ones([1, heads, 1, 1]) * 0.5)
        self.graph_temperature = nn.Parameter(torch.ones([1, heads, 1]) * 0.5)
        
        self.in_project_x = nn.Linear(dim, inner_dim)
        self.in_project_fx = nn.Linear(dim, inner_dim)
        self.in_project_slice = nn.Linear(dim_head, slice_num)
        for l in [self.in_project_slice]:
            torch.nn.init.orthogonal_(l.weight)  # use a principled initialization
        self.to_q = nn.Linear(dim_head, dim_head, bias=False)
        self.to_k = nn.Linear(dim_head, dim_head, bias=False)
        self.to_v = nn.Linear(dim_head, dim_head, bias=False)
        self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))

    def origin_forward(self, x):
        # B N C
        B, N, C = x.shape

        ### (1) Slice
        fx_mid = (
            self.in_project_fx(x)
            .reshape(B, N, self.heads, self.dim_head)
            .permute(0, 2, 1, 3)
            .contiguous()
        )  # B H N C
        x_mid = (
            self.in_project_x(x)
            .reshape(B, N, self.heads, self.dim_head)
            .permute(0, 2, 1, 3)
            .contiguous()
        )  # B H N C
        slice_weights = self.softmax(
            self.in_project_slice(x_mid) / self.temperature
        )  # B H N G
        slice_norm = slice_weights.sum(2)  # B H G
        slice_token = torch.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights)
        # torch.einsum("bhcn,bhng->bhgc", fx_mid, slice_weights)
        slice_token = slice_token / (
            (slice_norm + 1e-5)[:, :, :, None].repeat(1, 1, 1, self.dim_head)
        )
        slice_token_1 = slice_token.clone()
        
        ### (2) Attention among slice tokens
        q_slice_token = self.to_q(slice_token)
        k_slice_token = self.to_k(slice_token)
        v_slice_token = self.to_v(slice_token)
        dots = torch.matmul(q_slice_token, k_slice_token.transpose(-1, -2)) * self.scale
        attn = self.softmax(dots)
        # attn = self.dropout(attn)
        out_slice_token = torch.matmul(attn, v_slice_token)  # B H G D
        slice_token_2 = out_slice_token.clone()
        
        ### (3) Deslice       bh1gc,bhng1
        out_x = torch.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights)
        out_x_o_to_graph = out_x.clone().permute(0,2,1,3)
        out_x_o_to_graph = torch.cat([ out_x_o_to_graph[j] for j in range(out_x_o_to_graph.shape[0])],dim=0)

        out_x = rearrange(out_x, "b h n d -> b n (h d)")
        
        out_x_o_to_g = torch.cat([ out_x[j] for j in range(out_x.shape[0])],dim=0)
        
        return self.to_out(out_x_o_to_g), slice_token_1, slice_token_2, out_x_o_to_graph

    def graph_forward(self, x, batch):
        # x: [B*N, C], batch: [B*N, 1]
        total_nodes = x.size(0)
        batch_size = batch.max().item() + 1

        ### (1) Slice
        fx_mid = self.in_project_fx(x).view(total_nodes, self.heads, self.dim_head) # [B*N, H, D]
        x_mid = self.in_project_x(x).view(total_nodes, self.heads, self.dim_head) # [B*N, H, D]
        
        slice_weights = self.softmax(self.in_project_slice(x_mid) / self.graph_temperature)  # [B*N, H, G]
        slice_norm = scatter_add(slice_weights, batch, dim=0)  # [B, H, G]
        
        slice_token = scatter_add(fx_mid.unsqueeze(-2) * slice_weights.unsqueeze(-1), 
                                  batch, dim=0)  # [B, H, G, C]
        slice_token = slice_token / (slice_norm.unsqueeze(-1) + 1e-5)
        slice_token_1 = slice_token.clone()
        
        ### (2) Attention among slice tokens
        q_slice_token = self.to_q(slice_token)
        k_slice_token = self.to_k(slice_token)
        v_slice_token = self.to_v(slice_token)
        dots = torch.matmul(q_slice_token, k_slice_token.transpose(-1, -2)) * self.scale
        attn = self.softmax(dots)
        # attn = self.dropout(attn)
        out_slice_token = torch.matmul(attn, v_slice_token)  # B H G D
        slice_token_2 = out_slice_token.clone()
        
        ### (3) Deslice
        out_slice_token_expanded = out_slice_token[batch.squeeze()]  # [B*N, H, G, D]
        slice_weights_expanded = slice_weights  # [B*N, H, G]

        out_x = torch.sum(
            out_slice_token_expanded * slice_weights_expanded.unsqueeze(-1),
            dim=-2
        )  # [B*N, H, D]
        out_x_g = out_x.clone()
        out_x = out_x_g.reshape(-1,64)
        
        return self.to_out(out_x), slice_token_1, slice_token_2,out_x_g

def test_compare_attention_models():

    # 模型参数
    dim = 64
    heads = 4
    dim_head = 16
    dropout = 0
    slice_num = 32
    
    # 创建模型实例
    original_model = Physics_Attention_1D(dim, heads, dim_head, dropout, slice_num)

    # 创建测试数据
    # 假设我们有3个样本,每个样本的点数分别为5, 3, 4
    B, N = 3, 4
    x = torch.randn(B, N, dim)  # [3, 4, 64]
    batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
    x_graph = x.reshape(-1, dim)  # [12, 64]
    
    # 运行原始模型
    with torch.no_grad():
        output_original,tk1_org,tk2_org,out_x_o_to_graph = original_model.origin_forward(x)
    
    # 运行图模型
    with torch.no_grad():
        output_graph,tk1_graph,tk2_graph,out_x_g = original_model.graph_forward(x_graph, batch)
    
    # 重塑图模型输出以匹配原始输出
    output_graph_reshaped = output_graph.reshape(B, N, -1)
    
    # 比较输出
    max_diff = torch.max(torch.abs(out_x_o_to_graph - out_x_g))
    print(f"Maximum difference between outputs: {max_diff.item()}")
    
    # 检查输出形状
    assert out_x_o_to_graph.shape == out_x_g.shape, "Output shapes do not match"
    
    # 设置容差阈值
    tolerance = 1e-5
    
    if max_diff < tolerance:
        print("Test passed: Outputs are nearly identical within tolerance")
    else:
        print("Test failed: Outputs differ significantly")
    
    # 打印额外信息
    print(f"Original model output shape: {output_original.shape}")
    print(f"Graph model output shape (reshaped): {output_graph_reshaped.shape}")

# 运行测试
test_compare_attention_models()