from transformers import BertTokenizer, BertModel
import torch
import faiss
import numpy as np
import pickle

query='什么是geochemistrypi？'
query='什么是geochemistrypi？'
query='数据缺失了怎么办？'
query='missing value'
# query='数据缺失'
mode_group=0
mode=['documents','sample'][mode_group]
tokenizer_path=r'D:\autom\Repositories\models\bge-large-zh'
tokenizer_path=r'D:\autom\Repositories\models\bge-large-en'
top_k=[5,3][mode_group]

tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
model = BertModel.from_pretrained(tokenizer_path)
available_exts=['.md','.docx','.doc','.docx',]
documents_file_path=f'./data/{mode}.pkl'
index_path=lambda:f'/rag_result1/faiss_index_{mode}.pkl'
vector_base_path=lambda:f'/rag_result1/{mode}_vectors.pkl'
# 将文档转换为向量
def vectorize(texts):
    encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt', max_length=128)
    with torch.no_grad():
        model_output = model(**encoded_input)
    vectors = model_output.last_hidden_state[:, 0, :].numpy()
    return normalize_vectors(vectors)
    return vectors
def normalize_vectors(vectors):
    norms = np.linalg.norm(vectors, axis=1, keepdims=True)
    return vectors / norms
def load_documents(filename=documents_file_path):
    with open(filename, 'rb') as f:
        return pickle.load(f)

def save_documents(documents, filename=documents_file_path):
    with open(filename, 'wb') as f:
        pickle.dump(documents, f)

if mode_group==1:
    save_documents(
        documents=[
            'missing_value','数据缺失','data-mining'
        ]
    )