from langchain_community.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
import gradio as gr

hf = HuggingFacePipeline.from_model_id(
    model_id="THUDM/chatglm3-6b",
    task="text-generation",
    device=0,
    model_kwargs={"trust_remote_code":True,
                #   "temperature":0.9,
                #   "do_sample":True
                  },
    pipeline_kwargs={"max_new_tokens": 5000},
)
template = """{question}"""
prompt = PromptTemplate.from_template(template)
chain = prompt | hf

def greet2(name):
    response = chain.invoke({"question": name})
    return response

def alternatingly_agree(message, history):
   return greet2(message)

gr.ChatInterface(alternatingly_agree).launch(server_name="0.0.0.0")

