import os
# 使用国内镜像
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
from sklearn.feature_extraction.text import TfidfVectorizer
from sentence_transformers import SentenceTransformer
import numpy as np

# 示例文本数据
documents = [
    "猫喜欢晒太阳",
    "狗是人类最好的朋友", 
    "猫和狗是常见的宠物",
    "宠物给人带来快乐",
    "猫很爱晒太阳"
]

documents = [
    "喜欢",
    "爱", 
    "爱惜",
    "讨厌",
    "牛奶",
    "鲜牛奶",
    "纯牛奶"
]

# 方法1：使用TF-IDF提取文本特征
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
# TF-IDF向量化结果
print("TF-IDF向量化结果: ")
print(tfidf_matrix.toarray())
# 查看特征词
print("\nTF-IDF特征词:", tfidf_vectorizer.get_feature_names_out())

# 方法2：使用Sentence-BERT生成嵌入向量
# 加载预训练的Sentence-BERT模型
model = SentenceTransformer('all-MiniLM-L6-v2')
# 生成嵌入向量
sentence_embeddings = model.encode(documents)
# 打印嵌入向量
print("\nSentence-BERT生成的嵌入向量: ")
for i, embedding in enumerate(sentence_embeddings):
    print(f"句子{i + 1}的向量: {embedding[:5]} ...")  # 打印部分维度，避免输出过长

# 比较两种特征提取方法的向量相似性
from sklearn.metrics.pairwise import cosine_similarity
# 计算TF-IDF向量的余弦相似度
tfidf_similarity = cosine_similarity(tfidf_matrix)
print("\nTF-IDF向量之间的余弦相似度: ")
print(tfidf_similarity)
# 计算嵌入向量的余弦相似度
embedding_similarity = cosine_similarity(sentence_embeddings)
print("\n嵌入向量之间的余弦相似度: ")
print(embedding_similarity)