import os

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.llms.sparkllm import SparkLLM
from langchain_community.llms import Ollama
from langchain_openai import ChatOpenAI
import jwt
import time
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.embeddings import SparkLLMTextEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.llms.moonshot import Moonshot



zhipuai_api_key = "4b60409403162df4e015cdd99af1d803.aQEEaBQBpY6AXqPL"

def generate_token(apikey: str, exp_seconds: int):
        try:
            id, secret = apikey.split(".")
        except Exception as e:
            raise Exception("invalid apikey", e)

        payload = {
            "api_key": id,
            "exp": int(round(time.time() * 1000)) + exp_seconds * 1000,
            "timestamp": int(round(time.time() * 1000)),
        }

        return jwt.encode(
            payload,
            secret,
            algorithm="HS256",
            headers={"alg": "HS256", "sign_type": "SIGN"},
        )

zhipu_llm = ChatOpenAI(
        model_name="glm-4",
        openai_api_base="https://open.bigmodel.cn/api/paas/v4",
        #openai_api_base="http://localhost:1234/v1",
        openai_api_key=generate_token(zhipuai_api_key,864000),
        streaming=False,
        verbose=True
    )
xinghuo_llm = ChatOpenAI(
    model_name="spark-lite",
    openai_api_base="http://10.10.50.72:9090/v1",
    openai_api_key=123,
    streaming=False,
    verbose=True
)

local_llm = ChatOpenAI(
        model_name="glm-4",
        openai_api_base="http://localhost:1234/v1",
        openai_api_key=123,
        streaming=False,
        verbose=True
    )

sspy_llm = ChatOpenAI(
        model_name="internlm2-latest",
        openai_api_base="https://internlm-chat.intern-ai.org.cn/puyu/api/v1",
        openai_api_key="eyJ0eXBlIjoiSldUIiwiYWxnIjoiSFM1MTIifQ.eyJqdGkiOiI1MDIxMTAwOSIsInJvbCI6IlJPTEVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTcyMDc1NDQwMSwiY2xpZW50SWQiOiJlYm1ydm9kNnlvMG5semFlazF5cCIsInBob25lIjoiMTgyNTQxNjE2OTEiLCJ1dWlkIjoiNDlhZmU2MTYtNDIzZC00NTU3LWE4MzctZDFlYzRhODQ5OWM1IiwiZW1haWwiOiIiLCJleHAiOjE3MzYzMDY0MDF9.zCMSGN7nVyg2PZ3Wv9SVhj60-I3W5mhQETprCa9dKryACxhS-LX9zNEEuDOVErNMkpsG6LefIQsXQpXMjY-PSg",
        streaming=False,
        verbose=True
)


os.environ["IFLYTEK_SPARK_APP_ID"] = "849d9562"
os.environ["IFLYTEK_SPARK_API_KEY"] = "ZmI0NGE5ZGUwNzQwMGZjYWFiYjg4MTIx"
os.environ["IFLYTEK_SPARK_API_SECRET"] = "e33637f01cebee4a4675eb479d77df12"
#xinghuo_llm = SparkLLM()



os.environ["DASHSCOPE_API_KEY"] = "sk-c78e46dbb3824fd2bb26c3bcd4bbc2a6"
tongyi_llm = ChatTongyi(temperature=0.8)
tongyi_embeddings = DashScopeEmbeddings(
    model="text-embedding-v1", dashscope_api_key="sk-c78e46dbb3824fd2bb26c3bcd4bbc2a6"
)

tongyi_llm_openai = ChatOpenAI(
    #model_name="qwen-turbo",
    #openai_api_base="http://10.10.50.72:9090/v1",
    model_name="qwen2-1.5b-instruct",
    openai_api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
    openai_api_key="sk-c78e46dbb3824fd2bb26c3bcd4bbc2a6",
    streaming=False,
    verbose=True
)




os.environ["IFLYTEK_SPARK_APP_ID"] = "849d9562"
os.environ["IFLYTEK_SPARK_API_KEY"] = "e33637f01cebee4a4675eb479d77df12"
os.environ["IFLYTEK_SPARK_API_SECRET"] = "ZmI0NGE5ZGUwNzQwMGZjYWFiYjg4MTIx"
xinghuo_llm = SparkLLM(temperature=0.8)

xinhuo_embeddings = SparkLLMTextEmbeddings(
    spark_app_id="849d9562",
    spark_api_key="e33637f01cebee4a4675eb479d77df12",
    spark_api_secret="ZmI0NGE5ZGUwNzQwMGZjYWFiYjg4MTIx",
)

xinghuo_llm_openai = ChatOpenAI(
    model_name="generalv3.5",
    openai_api_base="http://60.204.226.75:32100/v1",
    openai_api_key="sk-z6U3P4mL6ntm39mz92AfAe5f4167419a86D4971fD8A36c9a")



ollama_llm = Ollama(
    model="qwen2",
    base_url="http://60.204.226.75:11434",
    callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)

ollama_emb = OllamaEmbeddings(
    model="qwen2",
    base_url="http://60.204.226.75:11434",
)


one_api_llm = ChatOpenAI(
    model_name="qwen2",
    openai_api_base="http://60.204.226.75:32100/v1",
    openai_api_key="sk-z6U3P4mL6ntm39mz92AfAe5f4167419a86D4971fD8A36c9a",
    streaming=False)




os.environ["MOONSHOT_API_KEY"] = "sk-RJgLy6YVPfRDBr7JlbYB9LjiI8L7NGhvcdkMkI8YYtLuiOAj"
kimi_llm = Moonshot()


kimi_llm_openai = ChatOpenAI(
    model_name="moonshot-v1-8k",
    openai_api_base="https://api.moonshot.cn/v1",
    openai_api_key="sk-RJgLy6YVPfRDBr7JlbYB9LjiI8L7NGhvcdkMkI8YYtLuiOAj",
    streaming=False,
    verbose=True
)


if __name__ == "__main__":
    messages = [
        # AIMessage(content="Hi."),
        # SystemMessage(content="Your role is a poet."),
        HumanMessage(content="深圳2008年的GDP多少亿"),
        #HumanMessage(content="only give me the result,no other words:the result of add 3 to 4"),
    ]
    response = xinghuo_llm_openai.invoke(messages)
    print(response)





