import torch

input_ids = torch.tensor([[2,3,5,1]])
vocab_size = 6
embedding_dim = 3

torch.manual_seed(123)
embedding = torch.nn.Embedding(vocab_size, embedding_dim)
print(f"embedding.weight: {embedding.weight}")
print(f"embedding(input_ids): {embedding(input_ids)}")

sample_id = torch.tensor([3])
# sample_id将转换为one-hot编码的向量（大小是vocab_size），然后与embedding.weight相乘
# 结果是embedding.weight的第3行，相当于按照sample_id提取embedding.weight的第3行
print(f"embedding(sample_id): {embedding(sample_id)}")
print(f"embedding.weight[3]: {embedding.weight[3]}")

print(f"embedding(input_ids): {embedding(input_ids)}")

context_length = input_ids.shape[1]
pos_embedding_layer = torch.nn.Embedding(context_length, embedding_dim)

pos_embedding = pos_embedding_layer(torch.arange(context_length))
print(f"pos_embedding: {pos_embedding}")

input_embed = embedding(input_ids) + pos_embedding
print(f"input_embed: {input_embed}")