# Please install OpenAI SDK first: `pip3 install openai`
from openai import OpenAI
import os
from dotenv import load_dotenv, find_dotenv
from langchain_deepseek import ChatDeepSeek
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

def normal_deepseek(api_key):
    client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")

    response = client.chat.completions.create(
        model="deepseek-chat",
        messages=[
            {"role": "system", "content": "You are a helpful assistant"},
            {"role": "user", "content": "Hello"},
        ],
        stream=False
    )
    return response.choices[0].message.content

def langchain_deepseek(api_key,model='deepseek-chat'):
    llm = ChatDeepSeek(
        model=model,
        temperature=0,
        max_tokens=None,
        timeout=None,
        max_retries=2,
        api_key=api_key,
    )
    return llm
    # llm.invoke(messages)
    # # for chunk in llm.stream(messages):
    # #     print(chunk.text(), end="")
    # stream = llm.stream(messages)
    # full = next(stream)
    # for chunk in stream:
    #     full += chunk
    # return full

def get_prompt_from_langchain(sys_template):
    chat_prompt = ChatPromptTemplate.from_messages([
        ("system", sys_template),
        ("human", "{input}"),
    ])
    return chat_prompt

def get_OutputParser_from_langchain():
    return StrOutputParser()

def get_llm():
    deepseek_api_key = os.environ['DPSEEK_API_KEY']
    llm = langchain_deepseek(api_key=deepseek_api_key)
    return llm

if __name__ == '__main__':
    _ = load_dotenv(find_dotenv())
    api_key = os.environ['DPSEEK_API_KEY']
    model_version = "deepseek-chat"
    text = "我带着比身体重的行李，\
    游入尼罗河底，\
    经过几道闪电 看到一堆光圈，\
    不确定是不是这里。\
    "
    sys_template = "你是一个翻译助手，可以帮助我将 {input_language} 翻译成 {output_language}."
    chat_prompt = get_prompt_from_langchain(sys_template)
    
    my_llm = langchain_deepseek(api_key,model_version)
    res = my_llm.invoke({"messages":[
        {"role":"user","content":"病人病历是："+"无病史"},
        {"role":"user","content":"病人总结病史："+"病人没有总结病史。"},
    ]})
    # res = my_llm.invoke("你好啊")
    print(res)
    # output_parser = get_OutputParser_from_langchain()
    # chain = chat_prompt |my_llm|output_parser
    # res = chain.invoke({"input_language":"中文", "output_language":"英文","text": text})
    # print(res)