from transformers import GPT2LMHeadModel, GPT2Config, GPT2Tokenizer

# 1. 初始化 GPT2 配置（可根据需要自定义）
config = GPT2Config(
    vocab_size=30522,        # 自定义词表大小（必须设置）
    n_positions=512,         # 最大序列长度
    n_ctx=512,
    n_embd=768,              # 嵌入维度
    n_layer=12,              # Transformer 层数
    n_head=12,               # 注意力头数
)

# 2. 构建模型，不加载任何预训练参数
model = GPT2LMHeadModel(config)
print(model)
# 打印结果
# GPT2LMHeadModel(
#   (transformer): GPT2Model(
#     (wte): Embedding(30522, 768)
#     (wpe): Embedding(512, 768)
#     (drop): Dropout(p=0.1, inplace=False)
#     (h): ModuleList(
#       (0-11): 12 x GPT2Block(
#         (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
#         (attn): GPT2Attention(
#           (c_attn): Conv1D(nf=2304, nx=768)
#           (c_proj): Conv1D(nf=768, nx=768)
#           (attn_dropout): Dropout(p=0.1, inplace=False)
#           (resid_dropout): Dropout(p=0.1, inplace=False)
#         )
#         (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
#         (mlp): GPT2MLP(
#           (c_fc): Conv1D(nf=3072, nx=768)
#           (c_proj): Conv1D(nf=768, nx=3072)
#           (act): NewGELUActivation()
#           (dropout): Dropout(p=0.1, inplace=False)
#         )
#       )
#     )
#     (ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
#   )
#   (lm_head): Linear(in_features=768, out_features=30522, bias=False)
# )




# 3. 用 gpt2 的分词器
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
print(len(tokenizer.get_vocab())) 
# 打印结果：50257

print(tokenizer)
# 打印结果
# GPT2Tokenizer(name_or_path='gpt2', vocab_size=50257, model_max_length=1024, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<|endoftext|>', 'eos_token': '<|endoftext|>', 'unk_token': '<|endoftext|>'}, clean_up_tokenization_spaces=False, added_tokens_decoder={
#         50256: AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),
# }
# gpt2的词表没有
# "cls_token": "[CLS]"
# "sep_token": "[SEP]"
# "pad_token": "[PAD]" 


# 打印特殊 token 字符
print("bos_token:", tokenizer.bos_token)
print("eos_token:", tokenizer.eos_token)
print("unk_token:", tokenizer.unk_token)

# 打印特殊 token 的对应 ID
print("bos_token_id:", tokenizer.bos_token_id)
print("eos_token_id:", tokenizer.eos_token_id)
print("unk_token_id:", tokenizer.unk_token_id)

# 验证特殊 token 实际是同一个
print("Are all special tokens the same?", 
      tokenizer.bos_token == tokenizer.eos_token == tokenizer.unk_token)


# 打印结果：
# bos_token: <|endoftext|>
# eos_token: <|endoftext|>
# unk_token: <|endoftext|>
# bos_token_id: 50256
# eos_token_id: 50256
# unk_token_id: 50256
# Are all special tokens the same? True