from langchain_openai import ChatOpenAI
from xinference.client import Client
# from dotenv import load_dotenv
import os
# load_dotenv()
os.environ["DASHSCOPE_API_KEY"] ="sk-42593e24dba7402ab1415dc68531bd86"

# llm
from langchain_community.chat_models import ChatTongyi


def get_llm():
    llm = ChatTongyi(model="qwen-max", temperature=0.1, top_p=0.3)
    return llm

def get_qwen():
    qwen = ChatTongyi(model="qwq-32b-preview", temperature=0.1, top_p=0.3)
    return qwen


def get_my_qwq():
    """
        连接自己部署的大语言模型
    """
    base_url = "https://cheerful-verve-5616-8080.east4.casdao.com/v1"
    api_key = "abc123"
    # model = "QwQ-32B-AWQ"
    model = "Qwen2.5-7B-Instruct"
    qwq = ChatOpenAI(base_url=base_url,
                    api_key=api_key,
                    model=model)
    return qwq

def get_asr():
    """
        连接自己部署的语音识别模型
    """
    client = Client("https://positive-galleria-9659-8080.east4.casdao.com")
    model = client.get_model("SenseVoiceSmall")
    return model
