test / app.py
reshav1's picture
Update app.py
76be9d9 verified
raw
history blame
1.1 kB
import transformers
import torch
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
# Load tokenizer and model
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
# Define a function to preprocess user input
def preprocess_input(text):
encoded_input = tokenizer(text, return_tensors='pt')
return encoded_input
# Define a function to generate response based on user input
def generate_response(user_input):
encoded_input = preprocess_input(user_input)
outputs = model(**encoded_input)
# Extract relevant information from model outputs (e.g., predicted class)
# Based on the extracted information, formulate a response using predefined responses or logic
response = "I'm still under development, but I understand you said: {}".format(user_input)
return response
# Start the chat loop
while True:
user_input = input("You: ")
if user_input == "quit":
break
bot_response = generate_response(user_input)
print("Bot:", bot_response)