import os

from chromadb.utils.embedding_functions import SentenceTransformerEmbeddingFunction
from langchain_ollama import ChatOllama

from app.config.logging import logger
from app.decorator.timeit import timeit

# ollama 相关配置
ollama_url = "http://43.163.104.91:11232"
# deepseek-r1:1.5b/ tinyllama:latest
ollama_chat_model_name = "qwen2.5:3b"
ollama_embedding_model_url = "bge-m3:latest"


@timeit
def init_embedding_model():
    # 优先使用本地已训练过的sentence-transform的embedding，速度快
    return SentenceTransformerEmbeddingFunction(model_name="./transformers/fin-bge-m3", normalize_embeddings=True,
                                                local_files_only=True)
    # 使用ollama中部署的embedding模型，每次都要调用请求，相对较慢
    # return OllamaEmbeddingFunction(url=ollama_url, model_name=ollama_embedding_model_url)


@timeit
def init_chat_model():
    return ChatOllama(model=ollama_chat_model_name, base_url=ollama_url, temperature=0.8, num_predict=256,
                      validate_model_on_init=True, keep_alive=3600)


# embedding 模型
bge_embedding_fun = init_embedding_model()

# chat 模型
chat_model = init_chat_model()

# 模型预热
result = chat_model.invoke(input='hello')
logger.info(f"chat model pre hot.result: {result}")
