import torch


text_embedding = torch.tensor(
    [
        [0.43, 0.15, 0.89],  # Your (x^1)
        [0.55, 0.87, 0.66],  # journey (x^2)
        [0.57, 0.85, 0.64],  # starts (x^3)
        [0.22, 0.58, 0.33],  # with (x^4)
        [0.77, 0.25, 0.10],  # one (x^5)
        [0.05, 0.80, 0.55]  # step (x^6)
    ]
)


def step_1_compute_attention_score(inputs):
    """
    self-attention first step: compute attention score
    """
    shape = inputs.shape
    res = torch.empty(shape[0], shape[0])
    print("res: ", res.shape)

    for q_idx in range(6):
        idx_score = torch.empty(inputs.shape[0])
        for i, row in enumerate(inputs):
            attention_score = torch.dot(row, inputs[q_idx])
            idx_score[i] = attention_score
        res[q_idx] = idx_score

    return res


def step_2_compute_attention_weights(attention_scores):
    for at_idx, row in enumerate(attention_scores):
        attention_scores[at_idx] = attention_scores[at_idx] / attention_scores[at_idx].sum()
    return attention_scores


def step_3_compute_context_vector(attention_weights, inputs):
    context = attention_weights @ inputs
    return context


# 版本1 遍历实现,主要是为了搞懂内部原理
def simplified_self_attention_v1(inputs_embedding):
    # step-1: compute attention score
    attention_scores = step_1_compute_attention_score(inputs_embedding)

    # step-2: compute attention weights
    attention_weights = step_2_compute_attention_weights(attention_scores)

    # step-3: compute context vector
    context_vector = step_3_compute_context_vector(attention_weights, inputs_embedding)
    return context_vector


# 版本1 优化使用矩阵运算
def simplified_self_attention_v2(inputs_embedding):
    # step-1: compute attention score
    attention_scores = inputs_embedding * inputs_embedding.T

    # step-2: compute attention weights
    attention_weights = torch.softmax(attention_scores, dim=1)

    # step-3: compute context vector
    context_vector = attention_weights @ inputs_embedding
    return context_vector


if __name__ == '__main__':
    print(simplified_self_attention_v1(text_embedding))
