MistralQnA / app.py
Abhishek Srivastava
'v1'
61d9ae2
raw
history blame
1.59 kB
import streamlit as st
st.title("Falcon QA Bot")
# import chainlit as cl
import os
huggingfacehub_api_token = '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'
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
repo_id = "tiiuae/falcon-7b-instruct"
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
repo_id=repo_id,
model_kwargs={"temperature":0.2, "max_new_tokens":2000})
template = """
You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
{question}
"""
# input = st.text_input("What do you want to ask about", placeholder="Input your question here")
# # @cl.langchain_factory
# def factory():
# prompt = PromptTemplate(template=template, input_variables=['question'])
# llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
# return llm_chain
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
# result = llm_chain.predict(question=input)
# print(result)
def chat(query):
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
result = llm_chain.predict(question=query)
return result
def main():
input = st.text_input("What do you want to ask about", placeholder="Input your question here")
if input:
output = chat(input)
st.write(output,unsafe_allow_html=True)
if __name__ == '__main__':
main()