| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| def main(): | |
| model_name = "microsoft/DialoGPT-small" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| print("Type 'quit' to exit.") | |
| chat_history_ids = None | |
| for step in range(20): | |
| user = input("You: ").strip() | |
| if user.lower() == "quit": | |
| break | |
| new_user_input_ids = tokenizer.encode(user + tokenizer.eos_token, return_tensors="pt").to(device) | |
| bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if chat_history_ids is not None else new_user_input_ids | |
| chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) | |
| response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
| print("Bot:", response) | |
| if __name__ == "__main__": | |
| main() | |