from mpmath.libmp import prec_to_dps
import torch
import torch.nn as nn

def HMA_Tensor_Parallelism(A,WQ,WK,WV):
    T,D = A.shape # B:batch_size T:seq_len D:embed_dim
    outputs = []
    for i in range(4):
        WQ_h = WQ[:, i*16:(i+1)*16]
        WK_h = WK[:, i*16:(i+1)*16]
        WV_h = WV[:, i*16:(i+1)*16]
        Q = torch.matmul(A,WQ_h)
        K = torch.matmul(A,WK_h)
        V = torch.matmul(A,WV_h)
        S = torch.matmul(Q,K.transpose(-2,-1))/(16**0.5)
        output = torch.softmax(S,dim=-1)
        output = torch.matmul(output,V)
        outputs.append(output)
    
    return torch.cat(outputs,dim=-1)

def HMA_Tensor_Parallelism_Normalization(B,WB,BB = 0):
    Out = torch.matmul(B,WB) + BB
    return Out

def MLP_Tensor_Parallelism_GEMM(D,WD,BD = 0):
    Out = torch.matmul(D,WD)+BD
    return Out

def MLP_Tensor_Parallelism_GELU(E,WE,BE = 0):
    Out = torch.matmul(E,WE)+BE
    return Out

#假设两个设备，将头维度分为两个部分
embed_dim = 128
num_heads = 8
head_dim = embed_dim // num_heads
device_count = 2
seq_len = 100
mlp_ratio = 4
dropout = 0.00

input_tensor = torch.randn(1, seq_len, embed_dim)
# 创建标准的Transformer层

layer = nn.TransformerEncoderLayer(
    d_model=embed_dim,
    nhead=num_heads,
    dim_feedforward=embed_dim * mlp_ratio,
    dropout=dropout,
    batch_first=True,
    norm_first=True
)
# 计算标准Transformer层的输出
with torch.no_grad():
    # 1. 输入层归一化和自注意力计算
    x = layer.norm1(input_tensor)
    #print("x", x) #此处相同
    ref_attn_output = layer.self_attn(x, x, x)[0]
    #print("ref_attn_output", ref_attn_output) #此处相同
    ref_attn_output = layer.dropout1(ref_attn_output)
    ref_add1 = input_tensor + ref_attn_output
    #print(f"ref_add1:{ref_add1} ) ") #此处相同
    # 2. MLP层计算
    x = layer.norm2(ref_add1)
    gelu = layer.activation(layer.linear1(x))
    # print("gelu", gelu) #此处相同
    d_gelu = layer.dropout(gelu)
    # print("d_gelu", d_gelu) #此处相同
    ref_mlp = layer.linear2(d_gelu)
    # print("ref_mlp", ref_mlp)
    # print("ref_mlp size", ref_mlp.size())
    ref = ref_add1 + layer.dropout2(ref_mlp)
    print("ref", ref)
    # 获取完整模型的输出
    #ref_model = layer(input_tensor)
    #print("完整ref", ref_model)


# 创建混合并行的Transformer层
# 获取 QKV 权重
WQKV = layer.self_attn.in_proj_weight   # (3*D, D)
BQKV = layer.self_attn.in_proj_bias     # (3*D)
WB = layer.self_attn.out_proj.weight     # (D, D)
BB = layer.self_attn.out_proj.bias       # (D)
# print(f'WQKV:{WQKV.size()}')
WQ, WK, WV = torch.chunk(WQKV, 3, dim=0) # (D, D)
# print(f'WQ:{WQ.size()} WK:{WK.size()} WV:{WV.size()}')

WQ = WQ.T
WK = WK.T
WV = WV.T
WB = WB.T

# QKV 拆成两段
WQ1, WQ2 = torch.chunk(WQ, 2, dim=1)
WK1, WK2 = torch.chunk(WK, 2, dim=1)
WV1, WV2 = torch.chunk(WV, 2, dim=1)

WB1, WB2 = torch.chunk(WB, 2, dim=0)
BB1, BB2 = torch.chunk(BB, 2, dim=0)

# 并行 MLP（前向 + GELU + Dropout + 后向）
# linear1: (dim_ff, embed_dim)
WD = layer.linear1.weight  # (512, 128)
BD = layer.linear1.bias    # (512)
WE = layer.linear2.weight  # (128, 512)
#print(f'WD:{WD.size()} BD:{BD.size()} WE:{WE.size()}')
BE = layer.linear2.bias    # (128)


WD1, WD2 = torch.chunk(WD.T, 2, dim=1)
BD1, BD2 = torch.chunk(BD, 2, dim=0)
WE1, WE2 = torch.chunk(WE.T, 2, dim=0)
#BE1, BE2 = torch.chunk(BE, 2, dim=0)


x = input_tensor

u1, u2 = torch.chunk(input_tensor.squeeze(0), device_count, dim=0)
# print(f'u1:{u1.size()} u2:{u2.size()}')
x1 = layer.norm1(u1)
x2 = layer.norm1(u2)

x = torch.cat((x1,x2),dim=0)

y1 = HMA_Tensor_Parallelism(x, WQ1, WK1, WV1)
c1 = HMA_Tensor_Parallelism_Normalization(y1,WB1,BB)
y2 = HMA_Tensor_Parallelism(x, WQ2, WK2, WV2)
c2 = HMA_Tensor_Parallelism_Normalization(y2,WB2)

c = c1 + c2
# print(f'c:{c},and c.size:{c.size()}')
c1, c2 = torch.chunk(c, device_count, dim=0)

c1 = layer.dropout1(c1)
c2 = layer.dropout1(c2)

g1 = c1 + u1
g2 = c2 + u2

# h = torch.cat((g1,g2),dim=0)
# h = layer.norm2(h)
# h1, h2 = torch.chunk(h, device_count, dim=0)
h1 = layer.norm2(g1)
h2 = layer.norm2(g2)
h = torch.cat((h1,h2),dim=0)

e1 = MLP_Tensor_Parallelism_GEMM(h,WD1,BD1)
e2 = MLP_Tensor_Parallelism_GEMM(h,WD2,BD2)
# e = torch.cat((e1,e2),dim=1)
e1 = layer.activation(e1)
e1 = layer.dropout(e1)
e2 = layer.activation(e2)
e2 = layer.dropout(e2)
f1 = MLP_Tensor_Parallelism_GELU(e1,WE1,BE)
f2 = MLP_Tensor_Parallelism_GELU(e2,WE2)
f = f1 + f2

print(f'f:{f}')
# print(f'f.size:{f.size()}')
f1, f2 = torch.chunk(f, device_count, dim=0)

f1 = layer.dropout2(f1)
f2 = layer.dropout2(f2)

out1 = f1+g1
out2 = f2+g2

out = torch.cat((out1,out2),dim=0)
print(out)
print()
# Print max difference for debugging

max_diff = (out - ref).abs().max().item()
print(f"Maximum absolute difference: {max_diff}")



