# Use a pipeline as a high-level helper
from transformers import pipeline
import time
import config
from config.model_config import qwen_model_path

start_time = time.time()
pipe = pipeline("text-generation", model=qwen_model_path, max_new_tokens=512)
load_pineline_time = time.time()
print('load pineline time', (load_pineline_time - start_time))


from langchain import PromptTemplate, HuggingFacePipeline
template = """Question: {question}\nAnswer: 分步思考如下："""
prompt = PromptTemplate.from_template(template)
llm_chain = prompt | HuggingFacePipeline(pipeline=pipe)

input_question = "如何应对气候变化？"
result = llm_chain.invoke({"question": input_question})

# 5. 输出结果
print("问题：", input_question)
print("回答：", result)
