| | import gradio as gr |
| | from langchain_huggingface.llms import HuggingFacePipeline |
| | from langchain_core.prompts import PromptTemplate |
| |
|
| | |
| | hf = HuggingFacePipeline.from_model_id( |
| | model_id="gpt2", |
| | task="text-generation", |
| | pipeline_kwargs={"max_new_tokens": 10}, |
| | ) |
| |
|
| | |
| | template = """Question: {question} |
| | |
| | Answer: Let's think step by step.""" |
| | prompt = PromptTemplate.from_template(template) |
| |
|
| | |
| | chain = prompt | hf |
| |
|
| | |
| | def respond(question,history): |
| | return chain.invoke({"question": question}) |
| |
|
| | |
| | chat_interface = gr.ChatInterface(fn=respond, title="Q&A Chatbot", description="Ask any question and get an answer!") |
| |
|
| | |
| | chat_interface.launch() |
| |
|