# -*- coding: UTF-8 -*-
'''
@File ：4_bert.py
@IDE ：PyCharm
@Author ：chaojie
@Date ：2025/11/7 
@Introduce:

# '''
# NOTE: 所有使用到transformers库的代码均需要科学上网
# 参考:
#     https://huggingface.co/
#     https://huggingface.co/docs
#     https://huggingface.co/docs/transformers/index
#     https://huggingface.co/docs/transformers/v4.41.3/zh/index
#
# 安装: pip install transformers==4.42.0
# NOTE: 需要访问外网(需要下载模型)，可以选择使用：https://honghai.xn--cesw6hd3s99f.com/#/dashboard
# 网站：https://huggingface.co/
#
# NOTE:
#     默认情况下，会下载到当前用户根目录下的.cache/huggingface文件夹中
#     但是可以通过给定环境变量:
#    os.environ['XDG_CACHE_HOME'] = r'D:\cache'
#         XDG_CACHE_HOME=xxx 来指定模型保存文件路径
# """

import os
import torch.nn as nn


model_id =  r'D:\linuxFiles\nlp_demo\weights\bert-base-chinese'

def t1():
    from transformers.models.bert import BertTokenizer, BertModel

    #
    tokenizer = BertTokenizer.from_pretrained(model_id)
    text    = ["你好！我是小明！", "小明，你好呀！"]
    inputs = tokenizer(
        text,
        padding=True,  # 自动对齐长度
        truncation=True,  # 自动截断超长句子
        max_length=128,
        return_tensors='pt'  # 返回 PyTorch tensor
    )
    print(inputs)

    model = BertModel.from_pretrained(model_id)
    outputs = model(**inputs)
    print("最后一层的特征向量为: ", outputs.last_hidden_state.shape)  # [每个词的向量]
    print(outputs.pooler_output.shape)      # [cls]

def t2():
    from transformers.models.bert import BertTokenizer, BertModel, BertConfig

    cfg = BertConfig(
         vocab_size=1000, hidden_size=128,
         num_hidden_layers=2, num_attention_heads=2
     )

    model = BertModel(cfg)
    print(model)

def t3():
    # Load model directly
    from transformers import AutoTokenizer

    from transformers import AutoModel
    model       = AutoModel.from_pretrained(model_id)
    tokenizer   = AutoTokenizer.from_pretrained(model_id)
    print(type(model))
    print(type(tokenizer))


if __name__ == '__main__':
    t1()
