import os
# os.path.join(os.path.dirname(os.path.abspath(__file__)))
from chromadb import EmbeddingFunction
from typing import List
import numpy as np

from langchain_community.embeddings import HuggingFaceEmbeddings
from transformers import AutoModel, AutoTokenizer
from typing import List
from langchain_customized_huggingface import HuggingFaceBgeEmbeddings
#
# class HuggingFaceBgeEmbeddings(HuggingFaceEmbeddings):
#     def __init__(self, model_path: str, **kwargs):
#         # 直接使用本地模型路径
#         self.model_path = model_path
#         self.tokenizer = AutoTokenizer.from_pretrained(model_path)
#         self.model = AutoModel.from_pretrained(model_path)
#         super().__init__(**kwargs)
#
#     def embed_documents(self, texts: List[str]) -> List[List[float]]:
#         # 实现文档嵌入逻辑
#         inputs = self.tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
#         outputs = self.model(**inputs)
#         return outputs.last_hidden_state[:, 0].detach().numpy().tolist()
#
#
# class BgeChromaAdapter(EmbeddingFunction):
#     def __init__(self, model_path: str, device: str = 'cuda', normalize_embeddings: bool = True):
#         # 直接初始化嵌入模型
#         from transformers import AutoModel, AutoTokenizer
#         self.tokenizer = AutoTokenizer.from_pretrained(model_path)
#         self.model = AutoModel.from_pretrained(model_path).to(device)
#         self.normalize = normalize_embeddings
#
#     def __call__(self, input: List[str]) -> List[List[float]]:
#         # 生成嵌入向量
#         inputs = self.tokenizer(input, padding=True, truncation=True, return_tensors="pt").to(self.model.device)
#         outputs = self.model(**inputs)
#         embeddings = outputs.last_hidden_state[:, 0].cpu().detach().numpy()
#
#         # 应用归一化
#         if self.normalize:
#             import numpy as np
#             embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
#
#         return embeddings.tolist()
#
#
# def get_embedding(model_path, **kwargs):
#     # 返回适配Chroma接口的包装器
#     return BgeChromaAdapter(model_path, **kwargs)




def get_embedding(model_path, lang="zh", device='cuda', normalize_embeddings=True):
    model_name = os.path.basename(os.path.dirname(model_path))
    model_kwargs = {'device': device}
    encode_kwargs = {'normalize_embeddings': normalize_embeddings}
    return HuggingFaceBgeEmbeddings(
        model_name=model_name,
        model_path=model_path,
        model_kwargs=model_kwargs,
        encode_kwargs=encode_kwargs,
    )

if __name__ == '__main__':
    emb = get_embedding("F:/models/BAAI/bge-large-en-v1.5")
    print(emb.embed_documents("你好，中国！"))