| | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| |
|
| |
|
| | model_name = "facebook/blenderbot-400M-distill"
|
| |
|
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| | model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
| |
|
| |
|
| | def generate_response(user_input):
|
| | inputs = tokenizer(user_input, return_tensors="pt")
|
| | bot_output = model.generate(**inputs)
|
| | bot_response = tokenizer.decode(bot_output[0], skip_special_tokens=True)
|
| | return bot_response
|
| |
|
| |
|
| | conversation_history = []
|
| |
|
| | while True:
|
| | user_input = input("User: ")
|
| | conversation_history.append(user_input)
|
| |
|
| | if user_input.lower() == "exit":
|
| | break
|
| |
|
| | bot_response = generate_response(user_input)
|
| | conversation_history.append(f"Bot: {bot_response}")
|
| | print(f"Bot: {bot_response}")
|
| |
|
| | print("Updated Conversation History:", conversation_history)
|
| |
|