import os
from zhipuai import ZhipuAI
from typing import List
import numpy as np
# import torch

#Embedding类
from langchain_community.embeddings import HuggingFaceEmbeddings
class HFembedding:
    def __init__(self, model_path="") -> None:
        if model_path:
            client = HuggingFaceEmbeddings(
                model_name = model_path,
                model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
        else:
            client = HuggingFaceEmbeddings(
                model_name = os.getenv("M3E_BASE_MODEL_PATH"),
                model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
        self.embedding_model = client

    def get_embedding(self, content:str = "", model_name=""):
        return self.embedding.embed_query(content)

    def compare(self, text1: str, text2: str):
        embed1=self.embedding.embed_query(text1) 
        embed2=self.embedding.embed_query(text2)
        return np.dot(embed1, embed2) / (np.linalg.norm(embed1) * np.linalg.norm(embed2))

    def compare_v(cls, vector1: List[float], vector2: List[float]) -> float:
        dot_product = np.dot(vector1, vector2)
        magnitude = np.linalg.norm(vector1) * np.linalg.norm(vector2)
        if not magnitude:
            return 0
        return dot_product / magnitude


class ZhipuEmbedding(object):
    
    def __init__(self, api_key:str="") -> None:
        if api_key:
            api_key = api_key
        else:
            api_key = os.getenv("ZHIPUAI_API_KEY")
            
        client = ZhipuAI(api_key=api_key)
        self.embedding_model = client

    def get_embedding(self, content:str = "", model_name="embedding-2"):
        if len(content) <= 0 :
            raise Exception("content length must be equal 1")
        response =self.embedding_model.embeddings.create(
            model="embedding-2", #填写需要调用的模型名称
            input=content #填写需要计算的文本内容,
        )
        return response.data[0].embedding
    
    def compare_v(cls, vector1: List[float], vector2: List[float]) -> float:
        dot_product = np.dot(vector1, vector2)
        magnitude = np.linalg.norm(vector1) * np.linalg.norm(vector2)
        if not magnitude:
            return 0
        return dot_product / magnitude
    
    def compare(self, text1: str, text2: str):
        embed1=self.embedding_model.embeddings.create(
            model="embedding-2", #填写需要调用的模型名称
            input=text1 #填写需要计算的文本内容,
        ).data[0].embedding

        embed2=self.embedding_model.embeddings.create(
            model="embedding-2", #填写需要调用的模型名称
            input=text2 #填写需要计算的文本内容,
        ).data[0].embedding

        return np.dot(embed1, embed2) / (np.linalg.norm(embed1) * np.linalg.norm(embed2))
    
from sentence_transformers import SentenceTransformer
# from langchain.embeddings.base import Embeddings
class BGEembedding(object):
    """
    BGEembedding
    """
    def __init__(self, model_dir=None):
        if model_dir is None:
            model_dir = os.getenv("BGE_MODEL_DIR")

        model = SentenceTransformer(model_dir)
        self.embedding_model = model
        self.content = ""
        self.embedding_dim = 0
        
    def get_embedding(self, content:str = ""):
        if len(content) <= 0 :
            raise Exception("content length must be equal 1")
        result = self.embedding_model.encode([content], normalize_embeddings=True).tolist()[0]
        self.content = content
        self.embedding_dim = len(result)
        return result

    def compare_v(cls, vector1: List[float], vector2: List[float]) -> float:
        # 计算两个向量的相似度
        dot_product = np.dot(vector1, vector2)
        magnitude = np.linalg.norm(vector1) * np.linalg.norm(vector2)
        if not magnitude:
            return 0
        return dot_product / magnitude
    
    def compare(self, text1: str, text2: str):
        # 计算两个文本的相似度
        embed1 = self.get_embedding(text1)
        embed2 = self.get_embedding(text2)
        return np.dot(embed1, embed2) / (np.linalg.norm(embed1) * np.linalg.norm(embed2))


