import sys
import os

current_dir = os.path.dirname(os.path.abspath(__file__))
ch02_path = os.path.join(os.path.dirname(current_dir), 'ch02')
sys.path.append(ch02_path)

import torch
from tokenizer.gpt2 import GPT2Tokenizer

tokenizer = GPT2Tokenizer()

text = "Your journey starts with one step."
tokens = tokenizer.encode(text)
print(f"tokens: {tokens}")

hidden_dim = 3
embedding_layer = torch.nn.Embedding(tokenizer.n_vocab(), hidden_dim)
embedding_tokens = embedding_layer(torch.tensor(tokens))
print(f"embedding_tokens: {embedding_tokens}")

query = embedding_tokens[1]
key = embedding_tokens[1]

# prepare the trainable parameters
d_out = 2
torch.manual_seed(123)
W_q = torch.nn.Parameter(torch.randn(hidden_dim, d_out))
W_k = torch.nn.Parameter(torch.randn(hidden_dim, d_out))
W_v = torch.nn.Parameter(torch.randn(hidden_dim, d_out))

# calculate the attention scores
query_2 = query @ W_q
key_2 = key @ W_k
value_2 = key @ W_v

# calculate the attention weights
attention_scores = query_2.dot(key_2.T)
print(f"attention_scores: {attention_scores}")

# calculate simutenisly
queries = embedding_tokens @ W_q
keys = embedding_tokens @ W_k
values = embedding_tokens @ W_v

attention_scores = queries @ keys.T
print(f"attention_scores: {attention_scores}")

d_k = keys.shape[1]
attention_weights = torch.nn.functional.softmax(attention_scores / torch.sqrt(torch.tensor(d_k)), dim=-1)
print(f"attention_weights: {attention_weights}")
print(f"sum(attention_weights): {torch.sum(attention_weights, dim=-1)}")

context_vector = attention_weights @ values
print(f"context_vector: {context_vector}")
