import requests

OLLAMA_API_URL='http://localhost:11435/api/v1/models/bge-m3/embedding'
# 调用本地ollama部署的bge-m3模型实现向量化
def _call_ollama_embedding_api(texts: List[str]) -> List[List[float]]:
        embddings=[]
        for text in texts:
            # 去除过长文本，当前的编码模型支持最大8000token,
            truncated_text=text.strip()[:5000]
            if not truncated_text:
                truncated_text = '空内容'
            
            try:
                 response=requests.post(
                      OLLAMA_API_URL,
                      json={"input": truncated_text}
                 )

                 if response.status_code == 200:
                      embedding=response.json.get('embedding',[])
                      embddings.append(embedding)
            except Exception as e:
                 print(f'调用嵌入模型出错：{e}')
                 dummy=[0.0]*1024
                 embddings.append(dummy)