m96tkmok commited on
Commit
3b6ad6b
·
verified ·
1 Parent(s): 1ad6046

Update app.py

Browse files

Implement basic Chatbot

Files changed (1) hide show
  1. app.py +5 -20
app.py CHANGED
@@ -1,35 +1,20 @@
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- from langchain_core.prompts import ChatPromptTemplate
4
 
5
  # mistralai/Mistral-Nemo-Instruct-2407
6
  # Load the model and tokenizer
7
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
8
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
9
 
10
-
11
  st.title("Llama-3.2-3B-Instruct Text Generation")
12
  st.write("Enter a prompt and generate text using the Llama 3.2 3B model.")
13
 
14
- prompt = """
15
- You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.
16
- If you don't know the answer, just say that you don't know.
17
- Answer in bullet points. Make sure your answer is relevant to the question and it is answered from the context only.
18
- Question: {question}
19
- Context: {context}
20
- Answer:
21
- """
22
-
23
- prompt = ChatPromptTemplate.from_template(prompt)
24
-
25
  with st.form("llm-form"):
26
  user_input = st.text_area("Enter your question or statement:")
27
  submit = st.form_submit_button("Submit")
28
 
29
- if submit:
30
-
31
- #outputs = model.generate(user_input, max_length=200)
32
- inputs = tokenizer(user_input, return_tensors="pt")
33
- outputs = model.generate(inputs["input_ids"], max_length=50)
34
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
- st.write(generated_text)
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
  # mistralai/Mistral-Nemo-Instruct-2407
5
  # Load the model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
7
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
8
 
 
9
  st.title("Llama-3.2-3B-Instruct Text Generation")
10
  st.write("Enter a prompt and generate text using the Llama 3.2 3B model.")
11
 
 
 
 
 
 
 
 
 
 
 
 
12
  with st.form("llm-form"):
13
  user_input = st.text_area("Enter your question or statement:")
14
  submit = st.form_submit_button("Submit")
15
 
16
+ if submit:
17
+ inputs = tokenizer(user_input, return_tensors="pt")
18
+ outputs = model.generate(inputs["input_ids"], max_length=50)
19
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
+ st.write(generated_text)