from flask import Flask, request, jsonify
from transformers import AutoModel, AutoTokenizer
import torch
app = Flask(__name__)


# 加载模型和分词器
model_name = 'E:/selfgit/GLM/embedding_model_exp/models/bge-large-zh-v1.5/'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)


@app.route('/v1/embeddings', methods=['POST'])
def get_embedding():
    try:
        data = request.json
        texts = data.get('input', [])

        if isinstance(texts, str):
            texts = [texts]
        param_model = data.get('model', '')

        # 分词
        inputs = tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors='pt')

        # 获取模型输出
        with torch.no_grad():
            outputs = model(**inputs)
            list_embeddings = (
                mean_pooling(outputs, inputs["attention_mask"])
                .cpu()
                .numpy()
                .tolist()
            )
        # 获取嵌入向量
        # embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
        # 确保嵌入向量的形状正确
        # if len(embeddings.shape) == 1:
        #     embeddings = embeddings.unsqueeze(0)
        # 获取 token 数量
        input_ids = tokenizer(texts)['input_ids']
        prompt_tokens = [len(ids) for ids in input_ids]
        total_tokens = sum(prompt_tokens)

        response = {
            "object": "list",
            "data": [
                {
                    "object": "embedding",
                    "embedding": embedding,
                    "index": i
                }
                for i, embedding in enumerate(list_embeddings)
            ],
            "model": param_model,
            "usage": {
                "prompt_tokens": prompt_tokens,
                "total_tokens": total_tokens
            }
        }

        return jsonify(response), 200
    except Exception as e:
        return jsonify({'error': str(e)}), 400


def mean_pooling(model_output, attention_mask):
    # token_embeddings = model_output[0]
    # input_mask_expanded = (
    #     attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    # )
    # return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
    #     input_mask_expanded.sum(1), min=1e-9
    # )
    token_embeddings = model_output.last_hidden_state
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
    sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
    return sum_embeddings / sum_mask
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
