from langchain.embeddings.base import Embeddings
from transformers import AutoModel, AutoTokenizer
import torch
import numpy as np
from typing import List


class ModelScopeEmbeddings(Embeddings):
    def __init__(self, model_path, device='cpu'):
        self.model_path = model_path
        self.device = device
        self.load_model()

    def load_model(self):
        """加载模型和分词器"""
        print(f"正在加载模型: {self.model_path}")
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        self.model = AutoModel.from_pretrained(self.model_path).to(self.device)
        self.model.eval()
        print("模型加载完成")

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """为文档生成嵌入"""
        embeddings = []
        for text in texts:
            embedding = self._embed_text(text)
            embeddings.append(embedding.tolist())
        return embeddings

    def embed_query(self, text: str) -> List[float]:
        """为查询生成嵌入"""
        return self._embed_text(text).tolist()

    def _embed_text(self, text: str):
        """生成单个文本的嵌入"""
        # 编码文本
        inputs = self.tokenizer(
            text,
            padding=True,
            truncation=True,
            return_tensors="pt",
            max_length=512
        ).to(self.device)

        # 生成嵌入
        with torch.no_grad():
            outputs = self.model(**inputs)
            # 使用平均池化获得句子嵌入
            embeddings = self.mean_pooling(outputs, inputs['attention_mask'])
            embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)

        return embeddings.cpu().numpy()[0]

    def mean_pooling(self, model_output, attention_mask):
        """平均池化"""
        token_embeddings = model_output.last_hidden_state
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

#使用自定义嵌入类
local_model_path = "/Users/brightzhou/.cache/modelscope/hub/models/sentence-transformers/all-MiniLM-L6-v2"
custom_embeddings = ModelScopeEmbeddings(local_model_path, device='cpu')

# 测试
texts = ["This is a test", "Another example"]
embeddings = custom_embeddings.embed_documents(texts)
print(f"自定义嵌入生成完成: {len(embeddings)}x{len(embeddings[0])}")
