

from langchain_community.embeddings import XinferenceEmbeddings
from openai import OpenAI
from langchain_openai import ChatOpenAI
from xinference.client import Client
from dotenv import load_dotenv
import os
from utils.logger_config import LoggerManager
logger = LoggerManager().logger

# 获取当前文件的目录
current_dir = os.path.dirname(__file__)

# 构建到 conf/.qwen 的相对路径
conf_file_path_qwen = os.path.join(current_dir, '..', 'conf', '.qwen')

# 加载千问环境变量
load_dotenv(dotenv_path=conf_file_path_qwen)


# 模型调用服务
def get_qwen_models():
    ################### 模型服务选择 online 、deploy ##############
    model_service_type = os.getenv("MODEL_SERVICE_TYPE")
    if model_service_type == "online":
        return get_online_qwen_models()
    elif model_service_type == "deploy":
        return get_deploy_qwen_models()
    else:
        raise ValueError("Invalid model service type. Please set MODEL_SERVICE_TYPE to 'online' or 'deploy'.")


def get_online_qwen_models():
    """
    加载千问系列大模型-百炼云平台
    """
    # llm 大模型
    from langchain_community.llms.tongyi import Tongyi

    llm = Tongyi(model="qwen-turbo", temperature=0.1, top_p=0.7, max_tokens=1024)

    # chat 大模型
    from langchain_community.chat_models import ChatTongyi

    chat = ChatTongyi(model="qwen-turbo", temperature=0.01, top_p=0.2, max_tokens=1024)
    # embedding 大模型
    from langchain_community.embeddings import DashScopeEmbeddings

    embed = DashScopeEmbeddings(model="text-embedding-v3")

    return llm, chat, embed


def get_deploy_qwen_models():
    """
    加载千问系列大模型-魔搭在线Qwen3 API服务
    """
    model_server_url = os.getenv("MODEL_SERVER_URL")
    chat_model_id = os.getenv("CHAT_MODEL_ID")
    embed_mode_id = os.getenv("EMBED_MODEL_ID")
    api_key = os.getenv("API_KEY")
    #temperature = os.getenv("CHAT_MODEL_TEMPERATURE")
    logger.info(f"get_chat_model -> model_server_url:{model_server_url},chat_model_id:{chat_model_id},embed_mode_id:{embed_mode_id},api_key:{api_key}")

    # llm 大模型
    llm = ChatOpenAI(base_url=model_server_url,
                api_key=api_key,
                model=chat_model_id,
                max_tokens=1024,
                temperature=0.5,
                top_p=0.7,
                 extra_body={
                    "enable_thinking": False  # 添加这个参数以避免报错
                })


    # chat 大模型
    chat = ChatOpenAI(base_url=model_server_url,
                api_key=api_key,
                model=chat_model_id,
                max_tokens=1024,
                temperature=0.01,
                top_p=0.2,
                 extra_body={
                    "enable_thinking": False  # 添加这个参数以避免报错
                })
    
    # embedding 大模型
    # model_server_url = os.getenv("MODEL_SERVER_URL")
    # embed_mode_id = os.getenv("EMBED_MODEL_ID")
    # embed = XinferenceEmbeddings(server_url=model_server_url, model_uid=embed_mode_id)

     # embedding 大模型 text-embedding-v3  text-embedding-v4
    from langchain_community.embeddings import DashScopeEmbeddings
    embed = DashScopeEmbeddings(model="text-embedding-v3")

    return llm, chat, embed