MatheusHRV commited on
Commit
eb1d61e
·
verified ·
1 Parent(s): ac7d0c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -34
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
- from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
 
5
  # ------------------------
6
  # Streamlit UI
@@ -11,26 +10,17 @@ st.header("MHRV Chatbot")
11
  # ------------------------
12
  # Session memory
13
  # ------------------------
14
- if "sessionMessages" not in st.session_state:
15
- st.session_state.sessionMessages = [
16
- SystemMessage(
17
- content=(
18
- "You are a highly intelligent and helpful customer support assistant. "
19
- "Answer user questions clearly, politely, and professionally. "
20
- "If you don’t know the answer, say so instead of making things up. "
21
- "Provide step-by-step instructions if relevant and helpful."
22
- )
23
- )
24
- ]
25
 
26
  # ------------------------
27
  # Load model and tokenizer
28
  # ------------------------
29
- model_name = "bigscience/bloom-560m" # CPU-compatible
30
  tokenizer = AutoTokenizer.from_pretrained(model_name)
31
  model = AutoModelForCausalLM.from_pretrained(model_name)
32
 
33
- # Create text-generation pipeline
34
  generator = pipeline(
35
  "text-generation",
36
  model=model,
@@ -43,36 +33,48 @@ generator = pipeline(
43
  # ------------------------
44
  # Helper functions
45
  # ------------------------
46
- def load_answer(question):
47
- st.session_state.sessionMessages.append(HumanMessage(content=question))
 
48
 
49
- # Build prompt from session messages
50
- prompt = ""
51
- for msg in st.session_state.sessionMessages:
52
- if isinstance(msg, SystemMessage):
53
- prompt += f"System: {msg.content}\n"
54
- elif isinstance(msg, HumanMessage):
55
- prompt += f"Human: {msg.content}\n"
56
- elif isinstance(msg, AIMessage):
57
- prompt += f"AI: {msg.content}\n"
58
 
59
- # Generate answer
60
- output = generator(prompt, max_new_tokens=256, do_sample=True, temperature=0.3)
61
- answer_text = output[0]["generated_text"][len(prompt):].strip()
 
 
 
 
 
62
 
63
- st.session_state.sessionMessages.append(AIMessage(content=answer_text))
64
- return answer_text
 
65
 
66
- def get_text():
67
- return st.text_input("You: ", key="input")
 
68
 
69
  # ------------------------
70
- # Main app
71
  # ------------------------
72
- user_input = get_text()
73
  submit = st.button("Generate")
74
 
75
  if submit and user_input:
76
  response = load_answer(user_input)
77
  st.subheader("Answer:")
78
  st.write(response)
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
3
 
4
  # ------------------------
5
  # Streamlit UI
 
10
  # ------------------------
11
  # Session memory
12
  # ------------------------
13
+ if "conversation" not in st.session_state:
14
+ st.session_state.conversation = []
 
 
 
 
 
 
 
 
 
15
 
16
  # ------------------------
17
  # Load model and tokenizer
18
  # ------------------------
19
+ model_name = "bigscience/bloom-560m" # CPU-friendly
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
 
23
+ # Text-generation pipeline
24
  generator = pipeline(
25
  "text-generation",
26
  model=model,
 
33
  # ------------------------
34
  # Helper functions
35
  # ------------------------
36
+ def load_answer(user_input):
37
+ # Add user input to session
38
+ st.session_state.conversation.append({"role": "user", "content": user_input})
39
 
40
+ # Build prompt for BLOOM
41
+ system_instruction = (
42
+ "You are a helpful, professional customer support assistant. "
43
+ "Answer questions clearly, politely, and accurately. "
44
+ "If the question is mathematical or factual, provide the correct answer. "
45
+ "Do not repeat the user's message."
46
+ )
 
 
47
 
48
+ # Use last 3 messages (or fewer if new) to maintain context
49
+ prompt = system_instruction + "\n"
50
+ for msg in st.session_state.conversation[-3:]:
51
+ if msg["role"] == "user":
52
+ prompt += f"User: {msg['content']}\n"
53
+ elif msg["role"] == "assistant":
54
+ prompt += f"Assistant: {msg['content']}\n"
55
+ prompt += "Assistant:"
56
 
57
+ # Generate answer
58
+ output = generator(prompt, max_new_tokens=128, do_sample=False)
59
+ answer = output[0]["generated_text"][len(prompt):].strip()
60
 
61
+ # Save answer in session
62
+ st.session_state.conversation.append({"role": "assistant", "content": answer})
63
+ return answer
64
 
65
  # ------------------------
66
+ # Streamlit input
67
  # ------------------------
68
+ user_input = st.text_input("You: ", key="input")
69
  submit = st.button("Generate")
70
 
71
  if submit and user_input:
72
  response = load_answer(user_input)
73
  st.subheader("Answer:")
74
  st.write(response)
75
+
76
+ # Optional: show conversation history
77
+ if st.checkbox("Show conversation history"):
78
+ for msg in st.session_state.conversation:
79
+ role = "You" if msg["role"] == "user" else "Bot"
80
+ st.write(f"**{role}:** {msg['content']}")