from langchain.chat_models import init_chat_model

from langchain.prompts import (
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
    ChatPromptTemplate,
)


# llm = init_chat_model(
#     model_provider="openai",
#     model="qwen/qwq-32b:free",
#     base_url="https://openrouter.ai/api/v1",
#     api_key="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",
# )
llm = init_chat_model(
    model_provider="openai",
    model="deepseek/deepseek-chat-v3-0324:free",
    base_url="https://openrouter.ai/api/v1",
    # api_key="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",
    api_key="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",
)


def invoke(system_template, user_template, input_data):
    sys_msg = SystemMessagePromptTemplate.from_template(system_template)
    user_msg = HumanMessagePromptTemplate.from_template(user_template)

    prompt = ChatPromptTemplate.from_messages([sys_msg, user_msg])
    formatted_prompt = prompt.format(**input_data)

    print("formatted_prompt", formatted_prompt)

    response = llm.invoke(
        formatted_prompt,
    )

    return response.content
