import os
from dashscope import MultiModalConversation
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_openai import ChatOpenAI
from langchain_ollama import OllamaEmbeddings, OllamaLLM

DEEPSEEK_API_KEY_OS_VAR_NAME = "DEEPSEEK_API_KEY"
DEEPSEEK_URL = "https://api.deepseek.com/v1"
DEEPSEEK_CHAT_MODEL = "deepseek-chat"
DEEPSEEK_CHAT_LOCAL_MODEL = "deepseek-r1:1.5b"
DEEPSEEK_REASONER_MODEL = "deepseek-reasoner"

ALI_TONGYI_API_KEY_SYSVAR_NAME = "DASHSCOPE_API_KEY"
ALI_TONGYI_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
ALI_TONGYI_MAX_MODEL = "qwen-max-latest"
ALI_TONGYI_VL_MAX_MODEL = "qwen-vl-max"
ALI_TONGYI_AUDIO_MODEL = "qwen2-audio-instruct"
ALI_TONGYI_EMBEDDING = "text-6-embedding-v3"


# langchain调用模型
def get_lc_model_client(api_key, base_url, model):
    return ChatOpenAI(api_key=api_key, base_url=base_url, model=model, temperature=0.7)


# 通义千问文本模型
def get_ali_model_client():
    return get_lc_model_client(
        api_key=os.getenv(ALI_TONGYI_API_KEY_SYSVAR_NAME),
        base_url=ALI_TONGYI_URL,
        model=ALI_TONGYI_MAX_MODEL
    )


# 通义千问视觉语言模型
def get_ali_vl_model_client():
    return get_lc_model_client(
        api_key=os.getenv(ALI_TONGYI_API_KEY_SYSVAR_NAME),
        base_url=ALI_TONGYI_URL,
        model=ALI_TONGYI_VL_MAX_MODEL
    )


# 通义千问音频语言模型
def call_ali_audio_model(messages):
    return MultiModalConversation.call(model=ALI_TONGYI_AUDIO_MODEL,
                                       messages=messages,
                                       api_key=os.getenv(ALI_TONGYI_API_KEY_SYSVAR_NAME))

# 通义千问嵌入模型
def get_ali_embedding_model_client():
    return DashScopeEmbeddings(
        model=ALI_TONGYI_EMBEDDING, dashscope_api_key=os.getenv(ALI_TONGYI_API_KEY_SYSVAR_NAME))


# deepseek模型
def get_ds_model_client(model=DEEPSEEK_CHAT_MODEL):
    return get_lc_model_client(
        api_key=os.getenv(DEEPSEEK_API_KEY_OS_VAR_NAME),
        base_url=DEEPSEEK_URL,
        model=model
    )


# 本地deepseek模型, ollama run deepseek-r1:1.5b
def get_ds_local_model_client(model=DEEPSEEK_CHAT_LOCAL_MODEL):
    return OllamaLLM(model=model)


# 本地嵌入模型
def get_ollama_embeddings_client(model_name='bge-m3'):
    return OllamaEmbeddings(model=model_name)


# 文本转向量
def get_ollama_embed_vector(text):
    client = get_ollama_embeddings_client()
    return client.embed_query(text)
