ShAnSantosh commited on
Commit
85a00f5
1 Parent(s): 3041db0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -25
app.py CHANGED
@@ -1,10 +1,3 @@
1
- #from transformers import AutoModelForCausalLM, AutoTokenizer
2
- import torch
3
- """
4
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
5
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
6
- """
7
-
8
  import random
9
  import json
10
 
@@ -14,6 +7,7 @@ from model import NeuralNet
14
  from nltk_utils import bag_of_words, tokenize
15
 
16
  device = torch.device("cpu")
 
17
  with open('./intents.json', 'r') as json_data:
18
  intents = json.load(json_data)
19
 
@@ -31,25 +25,10 @@ model = NeuralNet(input_size, hidden_size, output_size).to(device)
31
  model.load_state_dict(model_state)
32
  model.eval()
33
 
34
- def predict(sentence, history):
35
  history = history or []
36
- """
37
- # tokenize the new input sentence
38
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
39
-
40
- # append the new user input tokens to the chat history
41
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
42
-
43
- # generate a response
44
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
45
-
46
- # convert the tokens to text, and then split the responses into the right format
47
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
48
- response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
49
- """
50
-
51
- sentence1 = tokenize(sentence)
52
- X = bag_of_words(sentence1, all_words)
53
  X = X.reshape(1, X.shape[0])
54
  X = torch.from_numpy(X).to(device)
55
 
 
 
 
 
 
 
 
 
1
  import random
2
  import json
3
 
 
7
  from nltk_utils import bag_of_words, tokenize
8
 
9
  device = torch.device("cpu")
10
+
11
  with open('./intents.json', 'r') as json_data:
12
  intents = json.load(json_data)
13
 
 
25
  model.load_state_dict(model_state)
26
  model.eval()
27
 
28
+ def predict(message, history):
29
  history = history or []
30
+ sentence = tokenize(message)
31
+ X = bag_of_words(sentence, all_words)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  X = X.reshape(1, X.shape[0])
33
  X = torch.from_numpy(X).to(device)
34