from re import A
import torch
import torch.nn as nn
import math

# def efficient_attention(xp, x, WQ, WK, WV, FH):
#     N = x.shape[0]
#     P = xp.shape[0]
#     F = WQ.shape[0]  # 输入特征维度
#     # 自动选择是否优化（Theorem 2）
#     threshold = (F - FH) / (F * FH)
#     decision_metric = 1 / P - 1 / N
#     use_optimized = decision_metric > threshold

#     Qp = xp @ WQ
#     K = x @ WK

#     if not use_optimized:
#         K = x @ WK
#         V = x @ WV

#         scores = torch.matmul(Qp, K.T)/ (FH ** 0.5)
#         scores = torch.clamp(scores, min=-30.0, max=30.0)

#         attn = torch.softmax(Qp @ K.T / (FH ** 0.5), dim=-1)

#         return attn @ V
#     else:
#         S = torch.softmax((Qp @ WK.T) @ x.T / (FH ** 0.5), dim=-1)

#         return (S @ x) @ WV
def efficient_attention_bias(xp, x, WQ, WK, WV, bQ, bK, bV, num_heads):
    """
    ViT/BERT-style multi-head self-attention with optional optimization.

    Parameters:
        xp: local partitioned input [P, F]
        x: full input [N, F]
        WQ, WK, WV: weights (F, F)
        bQ, bK, bV: biases (F,)
        num_heads: int

    Returns:
        output: attended result [P, F]
    """
    F = WQ.shape[0]  # hidden dim
    head_dim = F // num_heads
    scale = math.sqrt(head_dim)

    N = x.shape[0]
    P = xp.shape[0]

    # Theorem 2 decision
    threshold = (F - head_dim) / (F * head_dim)
    decision_metric = 1 / P - 1 / N
    use_optimized = decision_metric > threshold
    print(f"[Optim] Use optimized path? {use_optimized} (threshold: {threshold:.4f}, metric: {decision_metric:.4f})")

    outputs = []
    for h in range(num_heads):
        # Slice weights and biases for head h
        WQ_h = WQ[:, h * head_dim:(h + 1) * head_dim]
        WK_h = WK[:, h * head_dim:(h + 1) * head_dim]
        WV_h = WV[:, h * head_dim:(h + 1) * head_dim]

        bQ_h = bQ[h * head_dim:(h + 1) * head_dim]
        bK_h = bK[h * head_dim:(h + 1) * head_dim]
        bV_h = bV[h * head_dim:(h + 1) * head_dim]

        if use_optimized:
            Q = xp @ WQ_h + bQ_h  # [P, FH]
            Kx = (Q @ WK_h.T + bK_h)  # [P, FH]
            S = torch.softmax(Kx @ x.T / scale, dim=-1)  # [P, N]
            V = x @ WV_h + bV_h
            output = S @ V  # [P, FH]
        else:
            Q = xp @ WQ_h + bQ_h  # [P, FH]
            K = x @ WK_h + bK_h   # [N, FH]
            V = x @ WV_h + bV_h   # [N, FH]
            scores = torch.matmul(Q, K.T) / scale  # [P, N]
            #scores = torch.clamp(scores, -30, 30)
            attn = torch.softmax(scores, dim=-1)
            output = attn @ V  # [P, FH]

        outputs.append(output)

    return torch.cat(outputs, dim=-1)  # [P, F]

def efficient_attention(xp, x, WQ, WK, WV, num_heads):
    F = WQ.shape[0]  # hidden dim
    head_dim = F // num_heads
    scale = math.sqrt(head_dim)

    N = x.shape[0]
    P = xp.shape[0]

    # Theorem 2 决策逻辑
    threshold = (F - head_dim) / (F * head_dim)
    decision_metric = 1 / P - 1 / N
    use_optimized = decision_metric > threshold
    print(f"[Optim] Use optimized path? {use_optimized} (threshold: {threshold:.4f}, metric: {decision_metric:.4f})")

    # 对每个 head 进行 Eq(3) 或 Eq(8) 的判断与计算
    outputs = []
    for h in range(num_heads):
        # 提取当前 head 的 WQ, WK, WV
        WQ_h = WQ[:, h * head_dim:(h + 1) * head_dim]
        WK_h = WK[:, h * head_dim:(h + 1) * head_dim]
        WV_h = WV[:, h * head_dim:(h + 1) * head_dim]
        # 计算当前 head 的输出
        if use_optimized:
            # 优化路径：QW_K^T 再乘 x^T
            Q = xp @ WQ_h            # [P, FH]
            WK_T = WK_h.T            # [FH, FH]
            Kx = Q @ WK_T            # [P, FH]
            S = torch.softmax(Kx @ x.T / scale, dim=-1)  # [P, N]
            output = S @ (x @ WV_h)  # [P, FH]
            outputs.append(output)
        else:
            # 普通路径：Q, K, V 后做 QK^T softmax
            Q = xp @ WQ_h  # [P, FH]
            K = x @ WK_h   # [N, FH]
            V = x @ WV_h   # [N, FH]
            scores = torch.matmul(Q, K.T) / scale  # [P, N]
            #scores = torch.clamp(scores, -30, 30)
            attn = torch.softmax(scores, dim=-1)
            output = attn @ V
            outputs.append(output)
    # Concat all heads → [P, F]
    return torch.cat(outputs, dim=-1)

def partitioned_layer(x, start, end, layer):
    xp = x[start:end]
    device = xp.device
    layer = layer.to(device)
    if hasattr(layer.attention, 'self'):
        # BERT-style
        sa = layer.attention.self

        # 使用 efficient_attention 选择性优化计算
        context = efficient_attention(
        xp, x,
        sa.query.weight.T,
        sa.key.weight.T,
        sa.value.weight.T,
        sa.num_attention_heads
    )

        # 残差连接和 FFN
        context = layer.attention.output.dense(context)
        context = layer.attention.output.LayerNorm(context + xp)
        output = layer.intermediate.dense(context).relu()
        output = layer.output.dense(output)
        output = layer.output.LayerNorm(output + context)
        return output
    else:
        # ViT-style
        xp_norm = layer.layernorm_before(xp)
        full_norm = layer.layernorm_before(x)
        sa = layer.attention.attention
        num_heads = sa.num_attention_heads
        WQ, WK, WV = sa.query.weight.T, sa.key.weight.T, sa.value.weight.T
        #attn_out = efficient_attention(xp_norm, full_norm, WQ, WK, WV, num_heads)
        attn_out = efficient_attention_bias(
            xp_norm, full_norm, WQ, WK, WV,
            sa.query.bias, sa.key.bias, sa.value.bias,
            num_heads
        )
        proj_layer = layer.attention.output  # ViT 把 attention.proj 放在 output 中
        proj = proj_layer.dense(attn_out)
        attn_out = proj + xp
        res1 = layer.layernorm_after(attn_out)
        inter = layer.intermediate.dense(res1)
        # Vit 的 FFN 是 gelu
        if hasattr(layer.intermediate, 'intermediate_act_fn'):
            inter = layer.intermediate.intermediate_act_fn(inter)
        ffn = layer.output.dense(inter)
        
        output = attn_out + ffn
        return output