# import ollama
# from langchain_community.embeddings import OllamaEmbeddings
#
#
# def generate_embeddings(texts: list[str], model_name: str = 'bge-m3:latest'):
#     """使用 Ollama 生成 BGE-M3 嵌入"""
#     embeddings = []
#     emb = OllamaEmbeddings(
#         base_url="http://192.168.77.3:11434",
#         model="bge-m3"
#     )
#     for text in texts:
#         response =emb.embed_documents(texts=[text])
#
#         embeddings.append(response[0])
#     return embeddings
#
# # 示例文本
# documents = [
#     "深度学习是机器学习的一个分支",
#     "Python是一种流行的编程语言",
#     "Ollama 支持本地运行大语言模型"
# ]
#
# # 生成密集向量
# dense_embeddings = generate_embeddings(documents)
# print(f"生成的嵌入维度: {len(dense_embeddings[0])}")  # BGE-M3 输出1024维
from FlagEmbedding import BGEM3FlagModel
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

model = BGEM3FlagModel(r'I:\models\BAAI\bge-m3',  use_fp16=True)

sentences_1 = ["What is BGE M3?", "Defination of BM25"]
sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
               "BM25 is a bag-of-words retrieval function that ranks a set of documents based on the query terms appearing in each document"]

output_1 = model.encode(sentences_1, return_dense=True, return_sparse=True, return_colbert_vecs=True)
output_2 = model.encode(sentences_2, return_dense=True, return_sparse=True, return_colbert_vecs=True)

print(output_1['dense_vecs'][0])
# print(model.colbert_score(output_1['colbert_vecs'][0], output_2['colbert_vecs'][0]))
# print(model.colbert_score(output_1['colbert_vecs'][0], output_2['colbert_vecs'][1]))


embed = HuggingFaceEmbedding(
            model_name=r'I:\models\BAAI\bge-m3'
        )
o2 = embed._model.encode(sentences_1, return_dense=True, return_sparse=True, return_colbert_vecs=True)
print(o2)