def load_env():
    import os
    os.environ['LANGSMITH_TRACING_V2'] = 'true'
    os.environ['LANGSMITH_ENDPOINT'] = 'https://api.smith.langchain.com'
    os.environ['LANGSMITH_API_KEY'] = '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'
    os.environ['LANGSMITH_PROJECT'] = 'pr-potable-worth-13'

    os.environ['OPENAI_BASE_URL'] = 'http://jeniya.top/v1'
    os.environ['OPENAI_API_KEY'] = 'sk-0cNrlS1Wb6wZKol9e3l6NLMcmyKSpjdkoTr8pFRUIQxdXa5E'

    # dash scope
    os.environ['DASHSCOPE_API_KEY'] = 'sk-3759544f2e224917bd6f6782bd7a64ba'

def get_model_tongyi():
    from langchain_community.llms.tongyi import Tongyi

    model = Tongyi()

    return model

def test_linear_graph():
    from langgraph.graph import START, END, StateGraph
    from typing import TypedDict
    class State(TypedDict):
        input:str
        output:str

    def process_input(state: State):
        state['output'] = state['input']
        return state

    def finalize_output(state: State):
        state['output'] += ' >>finied'
        return state

    workflow = StateGraph(State)

    workflow.add_node('process_input', process_input)
    workflow.add_node('finalize_output', finalize_output)
    workflow.add_edge(START, 'process_input')
    workflow.add_edge('finalize_output', END)
    workflow.add_edge('process_input', 'finalize_output')

    graph = workflow.compile()
    result = graph.invoke({
        'input':'今天天气不错',
        'output':'',
    })

    print(result)

def test_conditional_graph():
    '''
    input -> check -> process -> ... -> End
                    -> skip  -> ... -> End
    '''
    from typing import TypedDict
    class State(TypedDict):
        input:str
        output:str
        need_process:bool
    
    def check_input(state: State):
        state['need_process']=len(state['input'])>7
        return state

    def process(state: State):
        state['output'] = state['input']
        return state

    def skip(state: State):
        state['output'] = '长度太短无需处理'
        return state
    # route
    def route(state: State):
        if state['need_process']:
            return 'process'
        return 'skip'
    
    from langgraph.graph import StateGraph, START, END
    workflow = StateGraph(State)
    workflow.add_node('check', check_input)
    workflow.add_node('process', process)
    workflow.add_node('skip', skip)

    workflow.add_edge(START,'check')
    workflow.add_edge('process',END)
    workflow.add_edge('skip',END)
    workflow.add_conditional_edges('check',route,{
        'process':'process',
        'skip':'skip'
    })

    graph = workflow.compile()
    result = graph.invoke({'input':'你好','output':'','need_process':False})

    print(result)

def test_cycle_graph():
    '''
    input process check END 
            ⬆________|
    '''
    from typing import TypedDict
    class State(TypedDict):
        count:int
        output:str
    from langgraph.graph import StateGraph,START,END
    workflow = StateGraph(State)

    def process(state: State):
        state['count'] += 1
        state['output'] = '当前计数：%d' % state['count']
        return state

    def check_continue(state: State):
        if state['count'] < 3:return 'process'
        return END

    workflow.add_node('process', process)

    workflow.add_edge(START,'process')
    workflow.add_conditional_edges('process', check_continue, {
        'process': 'process',
        END:END,
    })

    graph = workflow.compile()

    result = graph.invoke({
        'count': 0,

    })

    print(result)

def test_chain_graph():
    from langchain_core.tools import tool
    @tool
    def search(query: str) -> str: # 必须包含 doctring 在 函数体中
        '''模拟搜索工具'''
        return '检索 for %s，结果如下' % query

    from typing import TypedDict
    class State(TypedDict):
        query: str
        output: str
        need_search: bool

    load_env()
    model = get_model_tongyi()

    def check_search(state: State):
        prompt = '对于这个问题 `{query}` 是否需要进行网络搜索，只需回答 `yes` or `or`'
        from langchain_core.prompts import ChatPromptTemplate
        prompt_template = ChatPromptTemplate.from_messages([
            ('user',prompt)
        ])

        from langchain_core.output_parsers import StrOutputParser
        parser = StrOutputParser()

        chain = prompt_template | model | parser

        result = chain.invoke({'query':state['query']})
        print(result)
        
        state['need_search']= result=='yes'

        return state
    def process(state: State):
        state['output'] = search.invoke(state['query'])
        return state

    def direct_answer(state: State):
        state['output'] = '无需检索，直接输出'
        return state

    def answer(state: State):
        return 'process' if state['need_search'] else 'direct'


    from langgraph.graph import START, END, StateGraph

    workflow = StateGraph(State)
    workflow.add_node('check_search', check_search)
    workflow.add_node('process', process)
    workflow.add_node('direct_answer', direct_answer)

    workflow.add_edge(START, 'check_search')
    workflow.add_conditional_edges('check_search', answer, {
        'process': 'process',
        'direct': 'direct_answer'
    })
    workflow.add_edge('process',END)
    workflow.add_edge('direct_answer',END)

    graph = workflow.compile()

    result = graph.invoke({'query':'今天天气如何','output':'','need_search':False})

    print(result)

# test_linear_graph()
# test_conditional_graph()
# test_cycle_graph()
test_chain_graph()