Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,21 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
|
5 |
-
HUGGINGFACEHUB_API_TOKEN = "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"
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
client = InferenceClient(model=MODEL)
|
10 |
question = st.chat_input("Write your question")
|
11 |
-
context = st.chat_input("Write your context")
|
12 |
|
13 |
if question:
|
14 |
st.write("loading...")
|
15 |
|
16 |
-
answer =
|
17 |
-
answer_txt = answer["answer"]
|
18 |
st.write(f"User has sent the following prompt: {answer_txt}")
|
19 |
|
20 |
print("done")
|
|
|
1 |
+
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
|
2 |
import streamlit as st
|
|
|
3 |
import os
|
4 |
|
5 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
|
6 |
+
template = """Question: {question}. Answer:"""
|
7 |
+
model = "ThangDinh/qthang-finetuned"
|
8 |
+
|
9 |
+
promt = PromptTemplate(template=template, input_variables=["question"])
|
10 |
+
llm = HuggingFaceHub(repo_id=model, model_kwargs={"temperature":1e-10})
|
11 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
12 |
|
|
|
13 |
question = st.chat_input("Write your question")
|
|
|
14 |
|
15 |
if question:
|
16 |
st.write("loading...")
|
17 |
|
18 |
+
answer = llm_chain.run(question)
|
|
|
19 |
st.write(f"User has sent the following prompt: {answer_txt}")
|
20 |
|
21 |
print("done")
|