import os

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"

# 检查默认的缓存路径
import os
cache_path = os.path.join(os.path.expanduser("~"), ".cache", "huggingface", "transformers")
print(f"Default cache path: {cache_path}")


import numpy as np
from transformers import AutoTokenizer, AutoModel
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 加载预训练的BERT模型和分词器
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = AutoModel.from_pretrained("bert-base-uncased")

# 输入文本
text = "Python is a powerful language."

# 对文本进行编码
input_ids = tokenizer.encode(text, return_tensors='pt')

# 通过BERT模型生成文本表征（即每个词的最后一个隐藏状态）
with model.encoder.disabled_parallelism():
    last_hidden_states = model(input_ids)[0]

# 获取最后一个隐藏状态的平均值作为文本的3D表征
text_emb = np.mean(last_hidden_states[0], axis=0)  # 对所有词向量取平均

# 将文本表征映射到3D空间
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

# 创建3D散点图表示文本
ax.scatter(text_emb[:, 0], text_emb[:, 1], text_emb[:, 2], c='b')

# 设置标签和标题
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('3D Representation of Text')

plt.show()