Spaces:
Sleeping
Sleeping
File size: 1,565 Bytes
7adffbd 9a70ef5 7b418fe 3e757dd 4c075d3 ff85747 4c075d3 3103fe2 c32326e 3e757dd cbe0829 c0159bb 05622ff c0159bb 3e757dd cbe0829 c0159bb ff85747 cbe0829 ceba6e4 c0159bb cbe0829 3e757dd ff85747 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import os
from huggingface_hub import login
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
# Retry mechanism for loading the model and tokenizer
RETRY_ATTEMPTS = 10
TIMEOUT = 20 # Increase the timeout to 20 seconds
# Load model directly
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("emrecan/bert-base-turkish-cased-mean-nli-stsb-tr")
model = AutoModel.from_pretrained("emrecan/bert-base-turkish-cased-mean-nli-stsb-tr")
def predict(input_text):
"""Generate a response based solely on the current input text."""
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(
inputs.input_ids,
max_length=200,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
def chatbot(input_text):
"""Gradio chatbot function without multi-turn context."""
response = predict(input_text)
return [(input_text, response)]
# Create Gradio interface
iface = gr.Blocks()
with iface:
gr.Markdown("# 🤖 Simple DialoGPT Chatbot")
gr.Markdown("### A basic single-turn chatbot using DialoGPT")
chatbot_interface = gr.Chatbot(label="Chat with AI")
msg = gr.Textbox(label="Enter your message here")
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear Conversation")
submit_btn.click(chatbot, [msg], chatbot_interface)
clear_btn.click(lambda: [], None, chatbot_interface, queue=False)
iface.launch() |