abaliyan commited on
Commit
3c86eaf
·
verified ·
1 Parent(s): 74d091b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -5
app.py CHANGED
@@ -1,15 +1,41 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- st.title("custom chatbot with HF model")
4
 
5
 
6
  def get_response(input_text):
7
 
8
- response = f"this is response for {input_text}"
 
 
 
 
 
 
 
9
 
10
  return response
11
 
12
- user_input = st.text_input("Enter your query here..")
 
13
  if st.button("Get Response") and user_input:
14
- answer = get_response(user_input)
15
- st.write(answer)
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+
5
+
6
+ st.title("Generating Response with HuggingFace Models")
7
+ st.markdown("## Model: `facebook/blenderbot-400M-distill`")
8
+
9
+ import os
10
+ if "model" not in os.environ:
11
+ with st.spinner("Getting this ready for you.."):
12
+ model_name = "facebook/blenderbot-400M-distill"
13
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+
16
+ os.environ["MODEL"] = "1"#model
17
+ os.environ["TOKENIZER"] = "1"#tokenizer
18
 
 
19
 
20
 
21
  def get_response(input_text):
22
 
23
+ # Tokenize the input text and history
24
+ inputs = tokenizer.encode_plus(input_text, return_tensors="pt")
25
+
26
+ # Generate the response from the model
27
+ outputs = model.generate(**inputs)
28
+
29
+ # Decode the response
30
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
31
 
32
  return response
33
 
34
+ user_input = st.text_area("Enter your query here...")
35
+
36
  if st.button("Get Response") and user_input:
37
+ with st.spinner("Generating Response..."):
38
+ answer = get_response(user_input)
39
+ if answer is not None:
40
+ st.success('Great! Response generated successfully')
41
+ st.write(answer)