from langchain_openai import ChatOpenAI
from langchain_openai import OpenAI
from openai import OpenAI as baseOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import List, Callable, Any

qwen_model_path = r'D:\model\Qwen\Qwen2.5-0.5B-Instruct'
xin_base_url = 'http://localhost:5544/v1'
xin_qwen_model='Qwen2.5-0.5B-Instruct'
TEMPERATURE = 0.7
temperature = TEMPERATURE


def get_base_open_ai():
    client = baseOpenAI(api_key="API_SECRET_KEY", base_url=xin_base_url)
    return client

def get_chat_openai_by_biaoshu(
        max_tokens: int = None,
        streaming: bool = True,
        callbacks: List[Callable] = [],
        verbose: bool = True,
        stop: str = None,
        **kwargs: Any,
) -> ChatOpenAI:
    LLM_MODELS = ["qwen1.5-32b", "qwen-72b", "qwen2.5-32b-instruct-awq-long", "deepseek-r1-distill-qwen-32b"]

    model_name = LLM_MODELS[0]
    config = {
        "api_base_url": "http://10.217.247.72:9050/yfzc/qwen1.5-32b/v1",
        "api_key": "EMPTY",
        "secret": "eyJhcHBpZCI6Inp0bmx0amNzIiwiY3NpZCI6Inp0bmx0amNzdGVzdDAwMDAwMDAwMDAwMDAwMDAwMDAwNmJkMjRlNDg1YzQzNDQ4ZDg2N2EzMTNkMWU2ZWI1MzAifQ=="
    }
    model = ChatOpenAI(
        streaming=streaming,
        verbose=verbose,
        callbacks=callbacks,
        openai_api_key=config.get("api_key", "EMPTY"),
        openai_api_base=config.get("api_base_url"),
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
        default_headers={"X-Server-Param": config.get("secret")},
        **kwargs
    )
    return model



def get_chat_openai_xin(
        max_tokens: int = None,
        streaming: bool = True,
        callbacks: List[Callable] = [],
        verbose: bool = True,
        stop: str = None,
        **kwargs: Any,
) -> ChatOpenAI:
    config = {
        "api_base_url": xin_base_url,
        "api_key": "EMPTY",
    }
    model_name = "Qwen2.5-0.5B-Instruct"
    model = ChatOpenAI(
        streaming=streaming,
        verbose=verbose,
        callbacks=callbacks,
        openai_api_key=config.get("api_key", "EMPTY"),
        openai_api_base=config.get("api_base_url"),
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
        # default_headers={"X-Server-Param": config.get("secret")},
        **kwargs
    )
    return model



def get_chat_openai_zhipu(
        max_tokens: int = None,
        streaming: bool = True,
        callbacks: List[Callable] = [],
        verbose: bool = True,
        stop: str = None,
        **kwargs: Any,
) -> ChatOpenAI:
    config = {
        "api_base_url": "https://open.bigmodel.cn/api/paas/v4",
        "api_key": "e440104944fe4defabccd2620e5dff6c.xj8mlkvxPdr0HYff",
    }
    model_name = "glm-4-flash"
    model = ChatOpenAI(
        streaming=streaming,
        verbose=verbose,
        callbacks=callbacks,
        openai_api_key=config.get("api_key", "EMPTY"),
        openai_api_base=config.get("api_base_url"),
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
        # default_headers={"X-Server-Param": config.get("secret")},
        **kwargs
    )
    return model

def get_chat_openai_zhipu_flash_250414 (
        max_tokens: int = None,
        streaming: bool = True,
        callbacks: List[Callable] = [],
        verbose: bool = True,
        temperature: float = 0.7,
        stop: str = None,
        **kwargs: Any,
) -> ChatOpenAI:
    config = {
        "api_base_url": "https://open.bigmodel.cn/api/paas/v4",
        "api_key": "e440104944fe4defabccd2620e5dff6c.xj8mlkvxPdr0HYff",
    }
    model_name = "glm-4-flash-250414"
    model = ChatOpenAI(
        streaming=streaming,
        verbose=verbose,
        callbacks=callbacks,
        openai_api_key=config.get("api_key", "EMPTY"),
        openai_api_base=config.get("api_base_url"),
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
        # default_headers={"X-Server-Param": config.get("secret")},
        **kwargs
    )
    return model




def get_chat_openai_common (
        max_tokens: int = None,
        api_base_url: str = None,
        api_key: str = None,
        model_name: str = None,
        secret: str = None,
        streaming: bool = True,
        callbacks: List[Callable] = [],
        verbose: bool = True,
        temperature: float = 0.7,
        stop: str = None,
        **kwargs: Any,
) -> ChatOpenAI:
    model = ChatOpenAI(
        streaming=streaming,
        verbose=verbose,
        callbacks=callbacks,
        api_key=api_key,
        base_url=api_base_url,
        model=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
        default_headers={"X-Server-Param": secret},
        **kwargs
    )
    return model

