|
from transformers import pipeline |
|
|
|
bot = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B') |
|
|
|
while True: |
|
reply1 = "What is your question?" |
|
print(reply1) |
|
user_response = input() |
|
reply2 = "What do you want the answer to be?" |
|
print(reply2) |
|
user_response = input() |
|
reply3 = "That is the answer. As your intuition is simply what you want. What is your next question?" |
|
|
|
# Generate bot response using Hugging Face's inference API |
|
bot_response = bot(reply1 + "\n" + user_response + "\n" + reply2 + "\n" + user_response + "\n" + reply3, max_length=100) |
|
print(bot_response[0]['generated_text'].split(reply1)[-1].split(reply2)[0].strip()) |
|
|
|
|