Update README.md
Browse files
README.md
CHANGED
@@ -103,18 +103,14 @@ class ChatBot:
|
|
103 |
user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
|
104 |
|
105 |
# Concatenate the user input with chat history
|
106 |
-
if self.history:
|
107 |
chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
|
108 |
else:
|
109 |
chat_history_ids = user_input_ids
|
110 |
|
111 |
# Generate a response using the PEFT model
|
112 |
-
# response = peft_model.generate(chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
113 |
-
# response = peft_model.generate(chat_history_ids)
|
114 |
response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
115 |
|
116 |
-
|
117 |
-
|
118 |
# Update chat history
|
119 |
self.history = chat_history_ids
|
120 |
|
|
|
103 |
user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
|
104 |
|
105 |
# Concatenate the user input with chat history
|
106 |
+
if self.history.numel() > 0:
|
107 |
chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
|
108 |
else:
|
109 |
chat_history_ids = user_input_ids
|
110 |
|
111 |
# Generate a response using the PEFT model
|
|
|
|
|
112 |
response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
113 |
|
|
|
|
|
114 |
# Update chat history
|
115 |
self.history = chat_history_ids
|
116 |
|