
import torch
import faiss
from transformers import AutoModelForCausalLM, AutoTokenizer,AutoModel
import numpy as np

model_name = "./deepseek/"  # 示例：加载蒸馏后的 7B 模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)

input_sentence = ["你好啊", "你多大了", "我想去吃午饭",'你给我讲个故事']
output_sentence = ["我很好", "我今年18岁了", "正好，我们一起去",'从前有座山，山上有座庙，庙里有两个和尚']

def text_to_embedding(texts):
    embeddings = []
    for text in texts:
        inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
        outputs = model(**inputs)
        embedding = outputs.last_hidden_state[:, 0, :].detach().numpy()
        embeddings.append(embedding)
    return np.vstack(embeddings)

# 将输入文本转换为嵌入向量
input_embeddings = text_to_embedding(input_sentence)

# 构建 FAISS 索引
dimension = input_embeddings.shape[1]  # 嵌入向量的维度
index = faiss.IndexFlatL2(dimension)   # 使用 L2 距离
index.add(input_embeddings)            # 添加嵌入向量到索引

# 保存 FAISS 索引到磁盘
faiss.write_index(index, "vector_index.faiss")

# 加载 FAISS 索引
loaded_index = faiss.read_index("vector_index.faiss")

# 查询函数
def query_sentence(sentence):
    query_embedding = text_to_embedding([sentence])
    D, I = loaded_index.search(query_embedding, k=1)  # 查询最相似的1个结果
    most_similar_index = I[0][0]
    return output_sentence[most_similar_index]

# 测试查询
query_text = "你给我讲个故事"
result = query_sentence(query_text)
print(f"查询文本: {query_text}")
print(f"最相似的输出: {result}")