|
import torch |
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
|
|
model_path = "./t5-small-finetuned" |
|
tokenizer = T5Tokenizer.from_pretrained(model_path) |
|
model = T5ForConditionalGeneration.from_pretrained(model_path) |
|
|
|
|
|
model.eval() |
|
|
|
def chat_with_model(prompt): |
|
|
|
input_ids = tokenizer.encode(prompt, return_tensors='pt') |
|
|
|
|
|
with torch.no_grad(): |
|
output = model.generate( |
|
input_ids, |
|
max_length=150, |
|
num_beams=5, |
|
early_stopping=True |
|
) |
|
|
|
|
|
response = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return response |
|
|
|
def main(): |
|
print("Chatbot is running. Type 'exit' to end the conversation.") |
|
|
|
while True: |
|
user_input = input("You: ") |
|
if user_input.lower() == 'exit': |
|
break |
|
|
|
response = chat_with_model(user_input) |
|
print(f"Bot: {response}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|