MatheusHRV commited on
Commit
9afa870
·
verified ·
1 Parent(s): 2c5b3ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -1,23 +1,22 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
 
5
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
6
- st.header("Pequeno Chatbot - Matheus")
7
 
8
  if "sessionMessages" not in st.session_state:
9
  st.session_state.sessionMessages = [
10
  SystemMessage(content="You are a helpful customer support chatbot for a website.")
11
  ]
12
 
13
- # Load the Falcon3-1B-Instruct model
14
- generator = pipeline(
15
- "text-generation",
16
- model="tiiuae/Falcon3-1B-Instruct",
17
- device=-1, # CPU
18
- max_new_tokens=256,
19
- temperature=0.3
20
- )
21
 
22
  def load_answer(question):
23
  st.session_state.sessionMessages.append(HumanMessage(content=question))
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
 
5
  st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
6
+ st.header("MHRV Chatbot")
7
 
8
  if "sessionMessages" not in st.session_state:
9
  st.session_state.sessionMessages = [
10
  SystemMessage(content="You are a helpful customer support chatbot for a website.")
11
  ]
12
 
13
+ # Load tokenizer and model
14
+ model_name = "tiiuae/Falcon3-1B-Instruct"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
17
+
18
+ # Create a text-generation pipeline
19
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1, max_new_tokens=256, temperature=0.3)
 
20
 
21
  def load_answer(question):
22
  st.session_state.sessionMessages.append(HumanMessage(content=question))