import sys
import os
import re

# 获取绝对路径，确保路径正确
current_dir = os.path.dirname(os.path.abspath(__file__))
ch02_path = os.path.join(os.path.dirname(current_dir), 'ch02')
sys.path.append(ch02_path)

import torch
from tokenizer.tokenizer import Tokenizer

# 创建一个简单的词汇表
text = "I say goodbye, you say hello."
preprocessed_text = re.split(r'([,.:;?!_()"\']|--|\s)', text)
print(f"preprocessed_text: {preprocessed_text}")
vocab = {char: i for i, char in enumerate(sorted(set(preprocessed_text)))}

# 初始化tokenizer
tokenizer = Tokenizer(vocab)

tokens = tokenizer.encode(text)
print(tokens)

decoded_text = tokenizer.decode(tokens)
print(" ".join(decoded_text))

hidden_dim = 3
embedding_layer = torch.nn.Embedding(len(vocab), hidden_dim)
embedding_tokens = embedding_layer(torch.tensor(tokens))
print(f"embedding_tokens: {embedding_tokens}")

# caculate the attention score between query and input tokens
query = embedding_tokens[1]
print(f"query: {query}")

attention_scores_2 = torch.empty(embedding_tokens.shape[0])
for i, key in enumerate(embedding_tokens):
    attention_scores_2[i] = torch.dot(query, key)
print(f"attention_scores_2: {attention_scores_2}")

# caculate the attention weights using softmax
attention_weights_2 = torch.nn.functional.softmax(attention_scores_2, dim=0)
print(f"attention_weights_2: {attention_weights_2}")
print(f"sum(attention_weights_2): {sum(attention_weights_2)}")

# caculate the context vector
context_vector_2 = torch.zeros(embedding_tokens.shape[1])
for i, key in enumerate(embedding_tokens):
    context_vector_2 += attention_weights_2[i] * key
print(f"context_vector_2: {context_vector_2}")

# calculate the attention scores for all input tokens
attention_scores = torch.zeros(embedding_tokens.shape[0], embedding_tokens.shape[0])
for i, key_i in enumerate(embedding_tokens):
    for j, key_j in enumerate(embedding_tokens):
        attention_scores[i, j] = torch.dot(key_i, key_j)
print(f"attention_scores: {attention_scores}")
# or use matrix multiplication
attention_scores = embedding_tokens @ embedding_tokens.T
print(f"attention_scores using matrix multiplication: {attention_scores}")

# calculate the attention weights using softmax
attention_weights = torch.nn.functional.softmax(attention_scores, dim=-1)
print(f"attention_weights: {attention_weights}")
print(f"sum(attention_weights): {torch.sum(attention_weights, dim=-1)}")

# calculate the context vector
context_vector = attention_weights @ embedding_tokens
print(f"context_vector: {context_vector}")