import os

# 设置Hugging Face的国内镜像源
os.environ["HUGGINGFACE_HUB_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-hub"
os.environ["HF_DATASETS_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-datasets"
os.environ["HF_METRICS_URL"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-metrics"
os.environ["HF_HOME"] = "e:/hf/hf_home_directory"
os.environ["TRANSFORMERS_CACHE"] = "e:/hf/transformers_cache_directory"
os.environ["HF_DATASETS_CACHE"] = "e:/hf/datasets_cache_directory"

from transformers import BertModel, BertTokenizer


from datasets import load_dataset
import torch

# 加载预训练的BERT模型
model = BertModel.from_pretrained("bert-base-chinese")

# 加载预训练的BERT分词器
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

# 加载数据集
dataset = load_dataset("glue", "mrpc")

# 编码一段文本
inputs = tokenizer("你好，Hugging Face！", return_tensors="pt")
with torch.no_grad():
    outputs = model(**inputs)
print(outputs)
# 获取最后一层的隐藏状态
last_hidden_state = outputs.last_hidden_state

