Spaces:
Sleeping
Sleeping
File size: 1,586 Bytes
61d9ae2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import streamlit as st
st.title("Falcon QA Bot")
# import chainlit as cl
import os
huggingfacehub_api_token = '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'
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
repo_id = "tiiuae/falcon-7b-instruct"
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
repo_id=repo_id,
model_kwargs={"temperature":0.2, "max_new_tokens":2000})
template = """
You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
{question}
"""
# input = st.text_input("What do you want to ask about", placeholder="Input your question here")
# # @cl.langchain_factory
# def factory():
# prompt = PromptTemplate(template=template, input_variables=['question'])
# llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
# return llm_chain
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
# result = llm_chain.predict(question=input)
# print(result)
def chat(query):
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
result = llm_chain.predict(question=query)
return result
def main():
input = st.text_input("What do you want to ask about", placeholder="Input your question here")
if input:
output = chat(input)
st.write(output,unsafe_allow_html=True)
if __name__ == '__main__':
main()
|