Update README.md
Browse files
README.md
CHANGED
@@ -33,19 +33,19 @@ This modelcard aims to be a base template for new models. It has been generated
|
|
33 |
|
34 |
## Uses
|
35 |
|
36 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
37 |
-
import torch
|
38 |
|
39 |
|
40 |
-
tokenizer = AutoTokenizer.from_pretrained("finex/Stage-IOTGraphic")
|
41 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
42 |
|
43 |
-
|
44 |
-
|
45 |
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
46 |
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
|
47 |
|
48 |
-
|
49 |
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
|
50 |
|
51 |
# generated a response while limiting the total chat history to 1000 tokens,
|
|
|
33 |
|
34 |
## Uses
|
35 |
|
36 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
37 |
+
import torch
|
38 |
|
39 |
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained("finex/Stage-IOTGraphic")
|
41 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
42 |
|
43 |
+
Let's chat for 5 lines
|
44 |
+
for step in range(5):
|
45 |
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
46 |
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
|
47 |
|
48 |
+
append the new user input tokens to the chat history
|
49 |
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
|
50 |
|
51 |
# generated a response while limiting the total chat history to 1000 tokens,
|