import torch

def flash_attention_v1(Q, K, V):
    """
    FlashAttention V1 implementation with single-pass computation.
    Fuses Softmax(QK^T)V into one loop without storing S or P.

    Args:
        Q, K, V: Query, Key, Value matrices of shape (seq_len, head_dim)
    Returns:
        Output of attention: (seq_len, head_dim)
    """
    seq_len, head_dim = Q.shape
    scale = 1.0 / (head_dim ** 0.5)  # Scaling factor 1/sqrt(d)

    # Initialize output and running statistics
    O = torch.zeros_like(Q)  # Output
    l = torch.zeros(seq_len, dtype=torch.float32, device=Q.device)  # Sum of exponentials (d_i')
    m = torch.full((seq_len,), float('-inf'), device=Q.device)  # Max values (m_i)

    # Single pass over sequence length
    for i in range(seq_len):
        # Compute Q[i] * K^T for row i
        S_i = torch.matmul(Q[i:i+1], K.transpose(-1, -2)) * scale  # Shape: (1, seq_len)

        # Online Softmax for row i
        m_i = torch.max(S_i)  # Current max (m_i)
        m_old = m[i]  # Previous max (m_{i-1})
        m_new = torch.maximum(m_old, m_i)  # Update max
        l_old = l[i]  # Previous sum (d_{i-1}')

        # Update sum of exponentials (d_i')
        exp_diff = torch.exp(m_old - m_new)
        exp_S = torch.exp(S_i - m_new)
        l_new = l_old * exp_diff + torch.sum(exp_S)

        # Update output: O[i] = O[i] * exp(m_old - m_new) + exp(S_i - m_new) * V
        O[i] = O[i] * exp_diff + torch.matmul(exp_S / l_new, V)

        # Update statistics
        m[i] = m_new
        l[i] = l_new

    # Final normalization
    O = O / l.unsqueeze(-1)
    return O

# 示例使用
n = 4
d = 2
Q = torch.tensor([
    [1.0, 2.0],
    [3.0, 4.0],
    [5.0, 6.0],
    [7.0, 8.0]
])
K = torch.tensor([
    [1.0, 2.0],
    [3.0, 4.0],
    [5.0, 6.0],
    [7.0, 8.0]
])
V = torch.tensor([
    [1.0, 2.0],
    [3.0, 4.0],
    [5.0, 6.0],
    [7.0, 8.0]
])

output = flash_attention_v1(Q, K, V)
print("Output of Flash Attention V1:")
print(output)
