from transformers import ViTModel
import torch
from PIL import Image
from io import BytesIO
import requests
import torchvision.transforms as T
from transformers import ViTModel, ViTImageProcessor
import math

# 载入原始模型
model = ViTModel.from_pretrained("google/vit-base-patch16-224").eval().to("cuda")
processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224")
# 预处理图像
image_path = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n02086240_Shih-Tzu.JPEG"  # same as你自己的输入
if image_path.startswith("http"):
    image = Image.open(BytesIO(requests.get(image_path).content)).convert("RGB")
else:
    image = Image.open(image_path).convert("RGB")

# 使用 torchvision resize，避免 transformers 中 reducing_gap 错误
transform = T.Compose([
    T.Resize((224, 224)),
    T.ToTensor(),
    T.Normalize(mean=processor.image_mean, std=processor.image_std),
])
image_tensor = transform(image).unsqueeze(0).to("cuda")  # [1, 3, 224, 224]
x = model.embeddings(image_tensor)  # shape [1, 197, 768]

# Hugging Face 原始层
layer = model.encoder.layer[0]

def efficient_attention_bias_x(x, WQ, WK, WV, bQ, bK, bV, num_heads):
    F = WQ.shape[0]  # hidden dim
    head_dim = F // num_heads
    scale = math.sqrt(head_dim)
    print("[efficient_attention_bias_x] 输入x:", x.shape, x[0,:5])
    print("WQ:", WQ.shape, WQ.flatten()[:5])
    print("WK:", WK.shape, WK.flatten()[:5])
    print("WV:", WV.shape, WV.flatten()[:5])
    print("bQ:", bQ.shape, bQ[:5])
    print("bK:", bK.shape, bK[:5])
    print("bV:", bV.shape, bV[:5])
    outputs = []
    for h in range(num_heads):
        WQ_h = WQ[:, h * head_dim:(h + 1) * head_dim]
        WK_h = WK[:, h * head_dim:(h + 1) * head_dim]
        WV_h = WV[:, h * head_dim:(h + 1) * head_dim]
        bQ_h = bQ[h * head_dim:(h + 1) * head_dim]
        bK_h = bK[h * head_dim:(h + 1) * head_dim]
        bV_h = bV[h * head_dim:(h + 1) * head_dim]
        Q = x @ WQ_h + bQ_h  # [N, FH]
        K = x @ WK_h + bK_h   # [N, FH]
        V = x @ WV_h + bV_h   # [N, FH]
        if h == 0:
            print(f"[head {h}] Q:", Q.shape, Q[0,:5])
            print(f"[head {h}] K:", K.shape, K[0,:5])
            print(f"[head {h}] V:", V.shape, V[0,:5])
        scores = torch.matmul(Q, K.T) / scale  # [N, N]
        if h == 0:
            print(f"[head {h}] scores:", scores.shape, scores[0,:5])
        attn = torch.softmax(scores, dim=-1)
        if h == 0:
            print(f"[head {h}] attn:", attn.shape, attn[0,:5])
        output = attn @ V  # [N, FH]
        if h == 0:
            print(f"[head {h}] output:", output.shape, output[0,:5])
        outputs.append(output)
    out = torch.cat(outputs, dim=-1)  # [N, F]
    print("[efficient_attention_bias_x] 输出:", out.shape, out[0,:5])
    return out

def par(x,layer):
    print("[par] 输入x:", x.shape, x[0,:5])
    full_norm = layer.layernorm_before(x)
    print("[par] layernorm_before:", full_norm.shape, full_norm[0,:5])
    sa = layer.attention.attention
    num_heads = sa.num_attention_heads
    WQ, WK, WV = sa.query.weight.T, sa.key.weight.T, sa.value.weight.T
    attn_out = efficient_attention_bias_x(
        full_norm, WQ, WK, WV,
        sa.query.bias, sa.key.bias, sa.value.bias,
        num_heads
    )
    proj_layer = layer.attention.output  # ViT 把 attention.proj 放在 output 中
    proj = proj_layer.dense(attn_out)
    print("[par] proj:", proj.shape, proj[0,:5])
    if hasattr(proj_layer, 'dropout'):
        proj = proj_layer.dropout(proj)
    attn_out = proj + x
    print("[par] res1 (attn残差):", attn_out.shape, attn_out[0,:5])
    res1 = layer.layernorm_after(attn_out)
    print("[par] layernorm_after:", res1.shape, res1[0,:5])
    inter = layer.intermediate.dense(res1)
    print("[par] intermediate.dense+relu:", inter.shape, inter[0,:5])
    if hasattr(layer.intermediate, 'intermediate_act_fn'):
        inter = layer.intermediate.intermediate_act_fn(inter)
        print("[par] intermediate_act_fn:", inter.shape, inter[0,:5])
    ffn = layer.output.dense(inter)
    print("[par] output.dense:", ffn.shape, ffn[0,:5])
    if hasattr(layer.output, 'dropout'):
        ffn = layer.output.dropout(ffn)
    output = attn_out + ffn
    print("[par] 最终输出:", output.shape, output[0,:5])
    return output

# 你的自定义 attention 输出
custom_out = par(x.squeeze(0), layer)
#print(custom_out)
# 原始 Hugging Face 的一层
with torch.no_grad():
    # --- 官方ViT层forward详细打印 ---
    ref_x = x.clone().squeeze(0) # [197, 768]
    print("[HF] 输入x:", ref_x.shape, ref_x[0, :5])
    ref_norm1 = layer.layernorm_before(ref_x)
    # sa_module = layer.attention
    # proj = sa_module(ref_norm1)[0]
    
    print("[HF] layernorm_before:", ref_norm1.shape, ref_norm1[0, :5])
    sa = layer.attention.attention
    num_heads = sa.num_attention_heads
    WQ, WK, WV = sa.query.weight.T, sa.key.weight.T, sa.value.weight.T
    bQ, bK, bV = sa.query.bias, sa.key.bias, sa.value.bias
    # 官方attention输出
    Q = ref_norm1 @ WQ + bQ  # [197, 768]
    K = ref_norm1 @ WK + bK
    V = ref_norm1 @ WV + bV
    print("[HF] Q:", Q.shape, Q[0, :5])
    print("[HF] K:", K.shape, K[0, :5])
    print("[HF] V:", V.shape, V[0, :5])
    head_dim = Q.shape[-1] // num_heads
    scale = math.sqrt(head_dim)
    # 正确reshape: [seq_len, hidden_dim] -> [seq_len, num_heads, head_dim] -> [num_heads, seq_len, head_dim]
    Q_ = Q.view(Q.shape[0], num_heads, head_dim).transpose(0, 1)  # [num_heads, seq_len, head_dim]
    K_ = K.view(K.shape[0], num_heads, head_dim).transpose(0, 1)
    V_ = V.view(V.shape[0], num_heads, head_dim).transpose(0, 1)
    scores = torch.matmul(Q_, K_.transpose(1, 2)) / scale  # [num_heads, seq_len, seq_len]
    print("[HF] scores:", scores.shape, scores[0, 0, :5])
    attn = torch.softmax(scores, dim=-1)
    print("[HF] attn:", attn.shape, attn[0, 0, :5])
    attn_out = torch.matmul(attn, V_)  # [num_heads, seq_len, head_dim]
    attn_out = attn_out.transpose(0, 1).reshape(Q.shape[0], -1)  # [seq_len, hidden_dim]
    print("[HF] attn_out:", attn_out.shape, attn_out[0, :5])
    proj_layer = layer.attention.output
    proj = proj_layer.dense(attn_out)
    print("[HF] proj:", proj.shape, proj[0, :5])
    
    attn_out = proj + ref_x
    print("[HF] res1 (attn残差):", attn_out.shape, attn_out[0, :5])
    res1 = layer.layernorm_after(attn_out)
    print("[HF] layernorm_after:", res1.shape, res1[0, :5])
    inter = layer.intermediate.dense(res1)
    print("[HF] intermediate.dense+relu:", inter.shape, inter[0, :5])
    if hasattr(layer.intermediate, 'intermediate_act_fn'):
        inter = layer.intermediate.intermediate_act_fn(inter)
        print("[HF] intermediate_act_fn:", inter.shape, inter[0, :5])
    ffn = layer.output.dense(inter)
    print("[HF] output.dense:", ffn.shape, ffn[0, :5])

    output = attn_out + ffn
    print("[HF] 最终输出:", output.shape, output[0, :5])
    ref_out = layer(x)
print(ref_out)

layer = model.encoder.layer

# custom_out = par(x.squeeze(0), layer)
# print(custom_out)

# with torch.no_grad():
#     ref_out = model.encoder(x)
# print(ref_out)
