import numpy as np

def softmax(x):
    e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
    return e_x / np.sum(e_x, axis=-1, keepdims=True)

def scaled_dot_product_attention(Q, K, V):
    matmul_qk = np.matmul(Q, K.transpose(0, 2, 1))  # (batch, head, seq_length, seq_length)
    depth = K.shape[-1]
    logits = matmul_qk / np.sqrt(depth)
    attention_weights = softmax(logits)
    output = np.matmul(attention_weights, V)
    return output, attention_weights

def multi_head_attention(Q, K, V, num_heads):
    batch_size = Q.shape[0]
    seq_length = Q.shape[1]
    model_dim = Q.shape[2]
    assert model_dim % num_heads == 0
    depth = model_dim // num_heads

    heads = []
    for i in range(num_heads):
        Q_head = Q[:, :, i * depth:(i + 1) * depth]
        K_head = K[:, :, i * depth:(i + 1) * depth]
        V_head = V[:, :, i * depth:(i + 1) * depth]

        head_output, _ = scaled_dot_product_attention(Q_head, K_head, V_head)
        heads.append(head_output)

    multi_head_output = np.concatenate(heads, axis=2)
    return multi_head_output.reshape(batch_size, seq_length, model_dim)

batch_size = 2
seq_length = 10
model_dim = 512
num_heads = 8

Q = np.random.rand(batch_size, seq_length, model_dim)
K = np.random.rand(batch_size, seq_length, model_dim)
V = np.random.rand(batch_size, seq_length, model_dim)

output = multi_head_attention(Q, K, V, num_heads)
print("Attention output shape:", output.shape)