Files changed (1) hide show
  1. app.py +32 -31
app.py CHANGED
@@ -1,33 +1,34 @@
1
- import transformers
2
- import torch
3
  from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
4
 
5
- # Load tokenizer and model
6
- tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
7
- model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
8
-
9
- # Define a function to preprocess user input
10
- def preprocess_input(text):
11
- encoded_input = tokenizer(text, return_tensors='pt')
12
- return encoded_input
13
-
14
- # Define a function to generate response based on user input
15
- def generate_response(user_input):
16
- encoded_input = preprocess_input(user_input)
17
- outputs = model(**encoded_input)
18
- # Extract relevant information from model outputs (e.g., predicted class)
19
- # Based on the extracted information, formulate a response using predefined responses or logic
20
- response = "I'm still under development, but I understand you said: {}".format(user_input)
21
- return response
22
-
23
- # Start the chat loop
24
- while True:
25
- # Get user input
26
- line = input("Enter your input: ")
27
-
28
- if line.lower() == "quit":
29
- break
30
-
31
- # Generate response based on user input
32
- bot_response = generate_response(line) # Assuming generate_response is defined elsewhere
33
- print("Bot:", bot_response)
 
 
1
+ import streamlit as st
2
+ import torch
3
  from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
4
 
5
+ # Title and Description
6
+ st.title("Simple DistilBERT Chatbot")
7
+ st.write("This is a basic chatbot prototype. Ask it something!")
8
+
9
+ # Load Model and Tokenizer
10
+ @st.cache_resource # Cache for efficiency
11
+ def load_model_tokenizer():
12
+ tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
13
+ model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
14
+ return tokenizer, model
15
+
16
+ tokenizer, model = load_model_tokenizer()
17
+
18
+ # User Input
19
+ user_input = st.text_input("You: ")
20
+
21
+ # Generate Response on Button Click
22
+ if st.button("Send"):
23
+ if not user_input:
24
+ st.warning("Please enter some text.")
25
+ else:
26
+ # Preprocess and Generate Response (placeholder)
27
+ encoded_input = preprocess_input(user_input)
28
+ outputs = model(**encoded_input)
29
+
30
+ # (TODO) Extract relevant info from outputs
31
+
32
+ bot_response = "I'm still under development, but I understand you said: {}".format(user_input)
33
+ st.write("Bot: " + bot_response)
34
+