# 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
'''
export LANGSMITH_TRACING=true
export LANGSMITH_ENDPOINT=https://api.smith.langchain.com
export LANGSMITH_API_KEY=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
export LANGSMITH_PROJECT=pr-potable-worth-13

export OPENAI_BASE_URL=http://jeniya.top/v1/responses
export OPENAI_API_KEY=sk-0cNrlS1Wb6wZKol9e3l6NLMcmyKSpjdkoTr8pFRUIQxdXa5E
'''

def load_env():
    import os
    os.environ['LANGSMITH_TRACING'] = 'true'
    os.environ['LANGSMITH_ENDPOINT'] = 'https://api.smith.langchain.com'
    os.environ['LANGSMITH_API_KEY'] = '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'
    os.environ['LANGSMITH_PROJECT'] = 'pr-potable-worth-13'

    os.environ['OPENAI_BASE_URL'] = 'http://jeniya.top/v1'
    os.environ['OPENAI_API_KEY'] = 'sk-0cNrlS1Wb6wZKol9e3l6NLMcmyKSpjdkoTr8pFRUIQxdXa5E'

    # dash scope
    os.environ['DASHSCOPE_API_KEY'] = 'sk-3759544f2e224917bd6f6782bd7a64ba'


def get_model():
    from langchain_openai import ChatOpenAI

    # 创建一个大语言模型，model指定了大语言模型的种类
    model = ChatOpenAI(model="gpt-3.5-turbo-0125")

    return model

def get_model_tongyi():
    from langchain_community.llms.tongyi import Tongyi

    model = Tongyi()

    return model

def test():
    from langchain_core.messages import SystemMessage, HumanMessage
    from langchain_core.output_parsers import StrOutputParser

    # 加载.env文件的环境变量
    load_env()

    # 创建一个大语言模型，model指定了大语言模型的种类
    # model = get_model()
    model = get_model_tongyi()

    # 定义传递给模型的消息队列
    # SystemMessage的content指定了大语言模型的身份，即他应该做什么，对他进行设定
    # HumanMessage的content是我们要对大语言模型说的话，即用户的输入
    messages = [
        SystemMessage(content="把下面的语句翻译为英文。"),
        HumanMessage(content="今天天气怎么样？"),
    ]

    # 打印模型的输出结果
    # invoke 时  会在url后补全 chat/completions
    # 1. 主动打印content
    # print(model.invoke(messages).content)

    # 使用result接收模型的输出，result就是一个AIMessage对象
    result = model.invoke(messages)

    print('result>>', result)

    # 2. 自带的打印
    # result.pretty_print()
    # 定义一个解析器对象
    parser = StrOutputParser()

    # 3. 使用解析器对result进行解析
    parser.invoke(result)


def test_prompt_template():

    load_env()

    # model = get_model()
    model = get_model_tongyi()

    from langchain_core.prompts import ChatPromptTemplate
    chat_tmplate = ChatPromptTemplate.from_messages([
        ('system',"把下面的话翻译为{language}"),
        ('user','{text}')
    ])

    prompt = chat_tmplate.invoke({'language':'英文','text':"今天天气很差"})

    print(prompt)

    # LECL Langchain Expression Language
    # 使用  | 连接各个组件

    from langchain_core.output_parsers import StrOutputParser
    parser = StrOutputParser()
    chain = chat_tmplate | model | parser

    print(chain.invoke( {'language':'英语','text':'今天天气不好'} ))

def test_fast_ui():
    load_env()

    model = get_model()

    from fastapi import FastAPI
    app = FastAPI(
        title='Test LangChain',
        version='1.0',
        description='Simple API'
    )
    from langserve import add_routes
    import uvicorn

    from langchain_core.prompts import ChatPromptTemplate
    chat_template = ChatPromptTemplate.from_messages([
        ('system','把下面的话，翻译为{language}'),
        ('user','{text}'),
    ])

    from langchain_core.output_parsers import StrOutputParser
    parser = StrOutputParser()

    chain = chat_template | model | parser

    add_routes(app, chain, path='/chain')

    uvicorn.run(app, host='0.0.0.0', port=20024)

def test_agent():
    from langchain.agents import create_agent


    def get_weather(city: str) -> str:
        """Get weather for a given city."""
        return f"It's always sunny in {city}!"


    agent = create_agent(
        model="openai:gpt-5-mini",
        tools=[get_weather],
        system_prompt="You are a helpful assistant",
    )

    # Run the agent
    agent.invoke(
        {"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}]}
    )



# test()
test_prompt_template()
# test_fast_ui()