Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from transformers import AutoTokenizer,
|
| 3 |
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
| 4 |
|
| 5 |
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
|
|
@@ -10,17 +10,17 @@ if "sessionMessages" not in st.session_state:
|
|
| 10 |
SystemMessage(content="You are a helpful customer support chatbot for a website.")
|
| 11 |
]
|
| 12 |
|
| 13 |
-
# Load
|
| 14 |
-
model_name = "
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 16 |
-
model =
|
| 17 |
|
| 18 |
-
|
| 19 |
-
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1, max_new_tokens=256, temperature=0.3)
|
| 20 |
|
| 21 |
def load_answer(question):
|
| 22 |
st.session_state.sessionMessages.append(HumanMessage(content=question))
|
| 23 |
|
|
|
|
| 24 |
prompt = ""
|
| 25 |
for msg in st.session_state.sessionMessages:
|
| 26 |
if isinstance(msg, SystemMessage):
|
|
@@ -30,8 +30,9 @@ def load_answer(question):
|
|
| 30 |
elif isinstance(msg, AIMessage):
|
| 31 |
prompt += f"AI: {msg.content}\n"
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
|
|
|
| 35 |
|
| 36 |
st.session_state.sessionMessages.append(AIMessage(content=answer_text))
|
| 37 |
return answer_text
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 3 |
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
| 4 |
|
| 5 |
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
|
|
|
|
| 10 |
SystemMessage(content="You are a helpful customer support chatbot for a website.")
|
| 11 |
]
|
| 12 |
|
| 13 |
+
# Load Flan-T5-Small (CPU-friendly)
|
| 14 |
+
model_name = "google/flan-t5-small"
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 16 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
| 17 |
|
| 18 |
+
generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer, device=-1, max_new_tokens=256)
|
|
|
|
| 19 |
|
| 20 |
def load_answer(question):
|
| 21 |
st.session_state.sessionMessages.append(HumanMessage(content=question))
|
| 22 |
|
| 23 |
+
# Concatenate messages into a single prompt
|
| 24 |
prompt = ""
|
| 25 |
for msg in st.session_state.sessionMessages:
|
| 26 |
if isinstance(msg, SystemMessage):
|
|
|
|
| 30 |
elif isinstance(msg, AIMessage):
|
| 31 |
prompt += f"AI: {msg.content}\n"
|
| 32 |
|
| 33 |
+
# Generate response
|
| 34 |
+
output = generator(prompt)
|
| 35 |
+
answer_text = output[0]["generated_text"].strip()
|
| 36 |
|
| 37 |
st.session_state.sessionMessages.append(AIMessage(content=answer_text))
|
| 38 |
return answer_text
|