'''
2;[[[1.92,1.48],[0.67,-1,23],[0.35,-0.68]],[[-1.11,0.09],[-0.3,-0.39],[-0.59,-0.06]]];[[1.0,2.0],[2.0,2.0]];[[1.0,1.0],[2.0,2.0]];[[1.0,1.0],[2.0,2.0]];[[1.0,1.0],[2.0,2.0]]
预期输出
[[[14.64,14.64],[-5.36,-5.36],[-4.44,-4.44]],[[-2.79,-2.79],[-3.04,-3.04],[-2.79,-2.79]]]

'''

import json
import numpy as np

def multi_head_self_attention_from_scratch(input_string: str) -> list:
    processed_input = input_string.replace(",-1,23", ",-1.23")

    parts = processed_input.split(';')
    num_heads = int(parts[0])
    X = np.array(json.loads(parts[1]), dtype=float)
    WQ = np.array(json.loads(parts[2]), dtype=float)
    WK = np.array(json.loads(parts[3]), dtype=float)
    WV = np.array(json.loads(parts[4]), dtype=float)
    WO = np.array(json.loads(parts[5]), dtype=float)

    batch_size, seq_len, d_model = X.shape
    d_k = d_model // num_heads

    Q = X @ WQ
    K = X @ WK
    V = X @ WV

    Q_split = Q.reshape(batch_size, seq_len, num_heads, d_k).transpose(0, 2, 1, 3)
    K_split = K.reshape(batch_size, seq_len, num_heads, d_k).transpose(0, 2, 1, 3)
    V_split = V.reshape(batch_size, seq_len, num_heads, d_k).transpose(0, 2, 1, 3)

    attention_scores = (Q_split @ K_split.transpose(0, 1, 3, 2)) / np.sqrt(d_k)

    causal_mask = np.tril(np.ones((seq_len, seq_len)))
    masked_scores = np.where(causal_mask == 0, -np.inf, attention_scores)

    exp_scores = np.exp(masked_scores - np.max(masked_scores, axis=-1, keepdims=True))
    softmax_weights = exp_scores / np.sum(exp_scores, axis=-1, keepdims=True)
    softmax_weights = np.nan_to_num(softmax_weights)

    attention_output = softmax_weights @ V_split

    attention_output = attention_output.transpose(0, 2, 1, 3)
    concatenated_attention = attention_output.reshape(batch_size, seq_len, d_model)
    final_output = concatenated_attention @ WO

    return np.around(final_output, 2).tolist()

user_input_string = input()
output = multi_head_self_attention_from_scratch(user_input_string)
print(output)