File size: 765 Bytes
c3d3e17
 
 
 
 
 
53bacd1
c3d3e17
 
 
 
53bacd1
c3d3e17
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import streamlit as st
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch


# Load the model for inference
model1 = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service')
tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2")

def get_chatbot_response(message):
    inputs = tokenizer.encode(message, return_tensors='pt')
    reply_ids = model1.generate(inputs)
    return tokenizer.decode(reply_ids[0], skip_special_tokens=True)

# Streamlit interface
st.title("Customer Service Chatbot")

user_input = st.text_input("Type your question here:")

if user_input:
    response = get_chatbot_response(user_input)
    st.text_area("Response", value=response, height=100, max_chars=None, key=None)