import numpy as np
from scipy.special import softmax

print('Step1:3 inputs,d_model=4')
x = np.array([
    [1.0,0.0,1.0,0.0],
    [0.0,2.0,0.0,2.0],
    [1.0,1.0,1.0,1.0]
])

print('step2: weights 3 dimensions x d_model=4')
print('w_query')
w_query = np.array([
    [1,0,1],
    [1,0,0],
    [0,0,1],
    [0,1,1]
])
print('k_query')
w_key = np.array([
    [0,0,1],
    [1,1,0],
    [0,1,0],
    [1,1,0]
])
print('v_query')
w_value = np.array([
    [0,2,0],
    [0,3,0],
    [1,0,3],
    [1,1,0]
])
print('Step 3:Matrix multiplication to obtain Q,K,V')
print('Query:x * w_query')
Q = np.matmul(x,w_query)
print(Q)
print('Key: x * w_k')
K = np.matmul(x,w_key)
print(K)
print('Value:x * w_value')
V = np.matmul(x,w_value)
print(V)
print('Step4:Scaled Attention Scores')
k_d = 1
attention_scores = (Q @ K.transpose()) / k_d
print(attention_scores)

print('Step 5:scaled softmax attention_secores for each vector')
attention_scores[0] =softmax(attention_scores[0])
attention_scores[1] =softmax(attention_scores[1])
attention_scores[2] =softmax(attention_scores[2])
print(attention_scores[0])
print(attention_scores[1])
print(attention_scores[2])
print('========================')
print('Step 6:attention value obtained score1/d_k * V')
print(V[0])
print(V[1])
print(V[2])
print('Attention1')
attention1 = attention_scores[0].reshape(-1,1)
attention1 = attention_scores[0][0] * V[0]
print(attention1)
print('Attention2')
attention2 = attention_scores[0][1] * V[1]
print(attention2)
print('Attention3')
attention3 = attention_scores[0][2] * V[2]
print(attention3)
attention_input1 = attention1 + attention2 + attention3
print(attention_input1)

print('Step 8: Step 1 to step 7 for input 1 to 3')
# We assume have 3 results with learned weights (they were not trained in this example
#We assume we are implementing the original Transformer paper. We will have 3 results of 64 dimension earch
attention_head1 = np.random.random((3,64))
print(attention_head1)
