from transformers import pipeline
import ChatGLM
from langchain_community.llms import HuggingFacePipeline
import time
from langchain.prompts import PromptTemplate
# hf = HuggingFacePipeline.from_model_id(
#     model_id="THUDM/chatglm3-6b",
#     task="text-generation",
#     device=0,
#     model_kwargs={"trust_remote_code":True},
#     pipeline_kwargs={"max_new_tokens": 500,"temperature":0.9,"do_sample":True},
# )
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

llm = ChatGLM.ChatGLM_LLM(streaming=True,
                          callbacks=[StreamingStdOutCallbackHandler()]
                          )

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate

prompt = ChatPromptTemplate.from_template("tell me a long story about {topic}")
parser = StrOutputParser()
chain = prompt | llm | parser
print(chain.invoke({"topic": "bird"}))
# from langchain.schema import HumanMessage
# # create messages to be passed to chat LLM
# messages = [HumanMessage(content="tell me a long story")]
# print(llm.generate(messages))




# template = """{question}"""
# prompt = PromptTemplate.from_template(template)
# chain = prompt | llm

# async for chunk in chain.astream({"topic": "parrot"}):
    # print(chunk, end="|", flush=True)

    

# question = "介绍下白龙马?"
# question = "西游记有哪些人物"
# for i in range(1):
#     a = time.time()
#     print(chain.invoke({"question": question}))
#     print(time.time()-a)




