from openai import OpenAI
import os
from transformers import AutoTokenizer
from sentence_transformers import SentenceTransformer
import requests
import torch


class BaseEmbeddings:
    def __init__(self):
        pass

    def get_embedding(self, text):
        raise NotImplementedError("This method should be overridden by subclasses")
    
    @classmethod
    def cal_similarity(cls, embedding1, embedding2):
        if len(embedding1) != len(embedding2):
            raise ValueError("Embeddings must be of the same length")
        return sum(a * b for a, b in zip(embedding1, embedding2)) / (sum(a ** 2 for a in embedding1) ** 0.5 * sum(b ** 2 for b in embedding2) ** 0.5)

class OpenAIEmbeddings(BaseEmbeddings):
    def __init__(self, model_name='ernie-4.0-turbo-8k',operator='huggingface'):
        super().__init__()
        self.model_name = model_name
        self.operator = operator
        # DeepSeek
        if operator == 'deepseek':
            self.client = OpenAI(api_key='sk-499b1c4f68224b9eada9b3d70f947ed1',
                                base_url='https://api.deepseek.com')
        # Baidu
        elif operator == 'baidu':
            self.client = OpenAI(api_key='bce-v3/ALTAK-UgErZn6PhDu7pU84tCMBF/554e7b478b97dde30b74b529cf6a6ae269ea367a',
                                base_url='https://qianfan.baidubce.com/v2')
        # self.client.api_key = os.getenv("OPENAI_API_KEY")
        # self.client.base_url = os.getenv("OPENAI_BASE_URL")
        elif operator == 'huggingface':
            self.API_URL = "https://router.huggingface.co/hf-inference/models/BAAI/bge-large-zh/pipeline/feature-extraction"
            self.headers = {
                "Authorization": f"Bearer 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",
            }
    
    def query(self,payload):
        response = requests.post(self.API_URL, headers=self.headers, json=payload)
        return response.json()

    def pool_embedding(self, hidden_states, attention_mask=None, method='mean'):
        """
        Pooling function to convert hidden states to embeddings.
        """
        if method == "mean":
            if attention_mask is None:
                return hidden_states.mean(dim=0)
            else:
                mask_expanded = attention_mask.unsqueeze(-1).expand(hidden_states.size()).float()
                sum_hidden = torch.sum(hidden_states * mask_expanded, dim=1)
                sum_mask = mask_expanded.sum(dim=1).clamp(min=1e-9)
                return sum_hidden / sum_mask
        elif method == "cls":
            return hidden_states[:, 0, :]

        elif method == "max":
            if attention_mask is not None:
                mask_expanded = attention_mask.unsqueeze(-1).expand(hidden_states.size()).float()
                hidden_states = hidden_states.masked_fill(mask_expanded == 0, -1e9)
            return hidden_states.max(dim=1).values
        else:
            raise ValueError(f"Unknown pooling method: {method}")
    
    def get_embedding(self, text):
        if self.operator == 'huggingface':
            hidden_states = self.query({"inputs": text})
            embeddings = self.pool_embedding(torch.tensor(hidden_states))
        return embeddings
    
class LocalEmbeddings(BaseEmbeddings):
    def __init__(self, model_name_or_path='BAAI/bge-large-zh'):
        super().__init__()
        self.model_name_or_path = model_name_or_path
        self.tokenizer = SentenceTransformer(model_name_or_path=model_name_or_path)
        # self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)

    def get_embedding(self, text):
        # Use the local model to get the embedding
        embeddings = self.tokenizer.encode(text, normalize_embeddings=True)
        # embeddings = self.tokenizer.encode(text, return_tensors='pt').squeeze().tolist()
        return embeddings