from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, PromptTemplate, \
    HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.callbacks import StreamingStdOutCallbackHandler
import os

# model = ChatOpenAI(model="Qwen", base_url="http://10.245.130.1/backend_ai_new/v1/", temperature='0.01',
#                    openai_api_key='EMPTY', callbacks=[StreamingStdOutCallbackHandler()])

# model = ChatOpenAI(model="qwen2.5:32b", base_url="http://js1.blockelite.cn:31670/v1/", temperature='0.01',
#                    openai_api_key='EMPTY', callbacks=[StreamingStdOutCallbackHandler()])

model = ChatOpenAI(model="qwen2.5vl:32b", base_url="http://js1.blockelite.cn:31670/v1/", temperature='0.01',
                   openai_api_key='none', callbacks=[StreamingStdOutCallbackHandler()])

def get_llm_chain(tool_type):


    path = os.getcwd() + f"/prompt/{tool_type}.txt"
    with open(path, 'r', encoding='utf-8') as file:
        content = file.read()

    #print(content)  # 输出文件内容

    prompt = ChatPromptTemplate.from_messages(
        [MessagesPlaceholder(variable_name='chat_history', optional=True),

         SystemMessagePromptTemplate(
             prompt=PromptTemplate(
                 template=content
             )
         ),
         HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')),
         MessagesPlaceholder(variable_name='agent_scratchpad')]
    )
    chain = prompt | model
    return chain

def get_llm():
    return model