SantiagoPG commited on
Commit
53bacd1
1 Parent(s): fe39015

updated app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -4,12 +4,12 @@ import torch
4
 
5
 
6
  # Load the model for inference
7
- model = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service')
8
  tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2")
9
 
10
  def get_chatbot_response(message):
11
  inputs = tokenizer.encode(message, return_tensors='pt')
12
- reply_ids = model.generate(inputs)
13
  return tokenizer.decode(reply_ids[0], skip_special_tokens=True)
14
 
15
  # Streamlit interface
 
4
 
5
 
6
  # Load the model for inference
7
+ model1 = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service')
8
  tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2")
9
 
10
  def get_chatbot_response(message):
11
  inputs = tokenizer.encode(message, return_tensors='pt')
12
+ reply_ids = model1.generate(inputs)
13
  return tokenizer.decode(reply_ids[0], skip_special_tokens=True)
14
 
15
  # Streamlit interface