File size: 1,863 Bytes
606799e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
def chat_with_model():
print("Welcome to the Question Answering Chatbot! (type 'exit' to quit)")
while True:
question = input("You: ")
if question.lower() == 'exit':
print("Goodbye!")
break
context = input("Context: ")
if context.lower() == 'exit':
print("Goodbye!")
break
response = question_answerer(question=question, context=context)
answer = response['answer']
score = response['score']
print(f"Bot: {answer} (confidence: {score:.2f})")
# Save chat function code in a script
with open('chatbot.py', 'w') as f:
f.write('''
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering, pipeline
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("my_awesome_qa_model")
model = TFAutoModelForQuestionAnswering.from_pretrained("my_awesome_qa_model")
# Create a pipeline for question answering
question_answerer = pipeline("question-answering", model=model, tokenizer=tokenizer)
# Define the chat function
def chat_with_model():
print("Welcome to the Question Answering Chatbot! (type 'exit' to quit)")
while True:
question = input("You: ")
if question.lower() == 'exit':
print("Goodbye!")
break
context = input("Context: ")
if context.lower() == 'exit':
print("Goodbye!")
break
response = question_answerer(question=question, context=context)
answer = response['answer']
score = response['score']
print(f"Bot: {answer} (confidence: {score:.2f})")
# Run the chat function
if __name__ == "__main__":
chat_with_model()
''')
print("Chatbot script 'chatbot.py' has been created.")
if __name__ == "__main__":
chat_with_model() |