import sys
import os

current_dir = os.path.dirname(os.path.abspath(__file__))
ch02_path = os.path.join(os.path.dirname(current_dir), 'ch02')
sys.path.append(ch02_path)

import torch
from tokenizer.gpt2 import GPT2Tokenizer
from attention import SelfAttentionV1, SelfAttentionV2, CausalAttention
from attention import MultiHeadAttentionWrapper, MultiHeadAttention

def main():
    text = "Your journey starts with one step."
    tokenizer = GPT2Tokenizer()
    tokens = tokenizer.encode(text)
    print(f"tokens: {tokens}")

    # prepare the embedding layer
    hidden_dim = 3
    d_out = 2
    torch.manual_seed(1234)
    embedding_layer = torch.nn.Embedding(tokenizer.n_vocab(), hidden_dim)
    x = embedding_layer(torch.tensor(tokens))
    print(f"x: {x}")

    # prepare the attention layer
    attention = SelfAttentionV1(hidden_dim, d_out)
    print(f"attention(x): {attention(x)}")

    # using the second version of the attention layer
    attention_v2 = SelfAttentionV2(hidden_dim, d_out)
    print(f"attention_v2(x): {attention_v2(x)}")

    # assign the weights from v2 to v1
    print(f"attention.W_q.shape: {attention.W_q.shape}")
    param = next(attention_v2.parameters())
    print(f"attention_v2.W_q.shape: {param.shape}")
    W_q, W_k, W_v = attention_v2.parameters()
    
    # 正确的参数赋值方式：使用.data属性
    attention.W_q.data = W_q.T.clone()
    attention.W_k.data = W_k.T.clone()
    attention.W_v.data = W_v.T.clone()
    
    print(f"after assigned parameters, attention(x): {attention(x)}")

    # using the causal attention layer
    causal_attention = CausalAttention(hidden_dim, d_out, context_length=x.shape[0], dropout=0.1)
    print(f"causal_attention(x): {causal_attention(x)}")
    print(f"causal_attention.attention_weight: {causal_attention.attention_weight}")

    # using the multi-head attention layer
    multi_head_attention = MultiHeadAttentionWrapper(hidden_dim, d_out, context_length=x.shape[0], num_heads=2)
    print(f"multi_head_attention(x): {multi_head_attention(x)}")

    # using the multi-head attention layer
    multi_head_attention = MultiHeadAttention(hidden_dim, d_out, context_length=x.shape[0], num_heads=2)
    batched_x = x.unsqueeze(0).repeat(2, 1, 1)
    print(f"batched_x.shape: {batched_x.shape}")
    print(f"multi_head_attention(batched_x): {multi_head_attention(batched_x)}")

if __name__ == "__main__":
    main()
