from langchain_community.embeddings.dashscope import DashScopeEmbeddings
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from config import EMBEDDINGMODEL_URL, LLM_URL

# 本地嵌入模型conan
embedding_model = OpenAIEmbeddings(model="conan", openai_api_base=f"{EMBEDDINGMODEL_URL}", openai_api_key="sk-xxxx")
# 本地模型qwen2.5-72b-instruct-awq
tongyi_model = ChatOpenAI(model_name="qwen", openai_api_base=f"{LLM_URL}", openai_api_key="sk-xxxx", temperature=0, verbose=True)
# tongyi_model = ChatOpenAI(model_name="/root/qwen2_5_72b_gptq_int4", openai_api_base=f"http://192.168.10.126:8000/v1", openai_api_key="sk-xxxx", temperature=0, verbose=True)
# embedding_model = DashScopeEmbeddings(model="text-embedding-v1")
# tongyi_model = ChatTongyi(model="qwen2.5-72b-instruct", model_kwargs={"temperature": 0})

# 测试用
if __name__ == "__main__":
    print(embedding_model.embed_documents(["你好"]))
    print(tongyi_model.invoke("你好，你是谁"))