|
|
|
|
|
from langchain import HuggingFaceHub |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
import streamlit as st |
|
import os |
|
|
|
huggingface_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
|
|
|
os.environ["HUGGINGFACEHUB_API_TOKEN"]=huggingface_token |
|
llm_huggingface=HuggingFaceHub(repo_id="google/flan-t5-xxl",model_kwargs={"temperature":0.6,"max_length":64}) |
|
|
|
|
|
def get_openai_response(question): |
|
|
|
return llm_huggingface(question) |
|
|
|
|
|
|
|
st.set_page_config(page_title="Q&A Demo") |
|
st.title("Q&A Chatbot using LangChain") |
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
body { |
|
background-color: #f0f2f6; |
|
color: #333333; |
|
} |
|
</style> |
|
""", |
|
unsafe_allow_html=True |
|
) |
|
|
|
st.sidebar.title("Options") |
|
st.sidebar.info("Welcome to the Q&A Chatbot!") |
|
|
|
st.write("Ask me anything and I'll do my best to answer!") |
|
|
|
input=st.text_input("Question: ",key="input",value=" ") |
|
response=get_openai_response(input) |
|
|
|
|
|
submit=st.button("Ask the question") |
|
|
|
|
|
|
|
if submit: |
|
st.subheader("The Answer is") |
|
st.write(response) |
|
|
|
|
|
|
|
|