#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @Project : RAG-demo
# @File    : embedding_rerank_util.py
# @IDE     : PyCharm
# @Author  :ZH
# @Time    : 2025/6/30 15:51
from typing import Union, List
from util.async_http_client_util import async_http_client
from langchain_openai import OpenAIEmbeddings
from util.logger_setup_util import LoggerSetup

logger = LoggerSetup(__name__).get_logger()


async def bce_embedding_server(content: Union[str, list], **kwargs) -> List[dict]:
    """
    调用BCE的Embedding服务
    :param content: 文本内容
    :return: 返回的向量列表
    """

    embedding_server = OpenAIEmbeddings(
        model="bce-embedding-base_v1",
        base_url='http://10.245.130.4:19997/v1',
        openai_api_key="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
    )
    # 防止大量文本数据传入，处理是进行content分组
    if isinstance(content, str):
        content = [content]  # 确保content是列表
    response_content = []
    content_task = [content[index:index + 10] for index in range(0, len(content), 10)]
    for index, value in enumerate(content_task):
        if not value:
            continue
        # 调用BCE的Embedding服务
        response_content += await embedding_server.aembed_documents(value)
        logger.info(f"Batch {index + 1}/{len(content_task)} processed, current batch size: {len(value)}")
    return [{
        "content": content[index],
        "embedding": value
    } for index, value in enumerate(response_content)]


async def bce_rerank_server(query: str, documents: list, **kwargs) -> list:
    """
    调用BCE的Rerank服务
    :param query: 查询内容
    :param documents: 文档列表
    :return: 返回的文档分数列表
    """
    response = await async_http_client.post(
        url='http://10.245.130.4:19997/v1/rerank',
        headers={"accept": "application/json", "Content-Type": "application/json"},
        json={
            "query": query,
            "documents": documents,
            "model": "bce-reranker-base_v1"
        }
    )
    return response.get('results', [])
