MatheusHRV commited on
Commit
2919744
·
verified ·
1 Parent(s): 9afa870

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
 
5
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
@@ -10,17 +10,17 @@ if "sessionMessages" not in st.session_state:
10
  SystemMessage(content="You are a helpful customer support chatbot for a website.")
11
  ]
12
 
13
- # Load tokenizer and model
14
- model_name = "tiiuae/Falcon3-1B-Instruct"
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
17
 
18
- # Create a text-generation pipeline
19
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1, max_new_tokens=256, temperature=0.3)
20
 
21
  def load_answer(question):
22
  st.session_state.sessionMessages.append(HumanMessage(content=question))
23
 
 
24
  prompt = ""
25
  for msg in st.session_state.sessionMessages:
26
  if isinstance(msg, SystemMessage):
@@ -30,8 +30,9 @@ def load_answer(question):
30
  elif isinstance(msg, AIMessage):
31
  prompt += f"AI: {msg.content}\n"
32
 
33
- output = generator(prompt, max_new_tokens=256, do_sample=True, temperature=0.3)
34
- answer_text = output[0]["generated_text"][len(prompt):].strip()
 
35
 
36
  st.session_state.sessionMessages.append(AIMessage(content=answer_text))
37
  return answer_text
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
 
5
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
 
10
  SystemMessage(content="You are a helpful customer support chatbot for a website.")
11
  ]
12
 
13
+ # Load Flan-T5-Small (CPU-friendly)
14
+ model_name = "google/flan-t5-small"
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
17
 
18
+ generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer, device=-1, max_new_tokens=256)
 
19
 
20
  def load_answer(question):
21
  st.session_state.sessionMessages.append(HumanMessage(content=question))
22
 
23
+ # Concatenate messages into a single prompt
24
  prompt = ""
25
  for msg in st.session_state.sessionMessages:
26
  if isinstance(msg, SystemMessage):
 
30
  elif isinstance(msg, AIMessage):
31
  prompt += f"AI: {msg.content}\n"
32
 
33
+ # Generate response
34
+ output = generator(prompt)
35
+ answer_text = output[0]["generated_text"].strip()
36
 
37
  st.session_state.sessionMessages.append(AIMessage(content=answer_text))
38
  return answer_text