Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
# Load the language model and tokenizer | |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') | |
model = GPT2LMHeadModel.from_pretrained('gpt2') | |
# Define the coding mode function | |
def coding_mode(text): | |
# Prepend the text with a coding prompt | |
prompt = ">>> " | |
input_text = prompt + text | |
# Generate a response from the model | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
output_ids = model.generate(input_ids, max_length=1000, do_sample=True) | |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
# Remove the coding prompt from the response | |
response = response[len(prompt):] | |
return response | |
# Define the normal mode function | |
def normal_mode(text): | |
# Generate a response from the model | |
input_ids = tokenizer.encode(text, return_tensors="pt") | |
output_ids = model.generate(input_ids, max_length=1000, do_sample=True) | |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return response | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=normal_mode, | |
inputs=gr.inputs.Chat( | |
placeholder="Enter your message here...", | |
prompt="Me: ", | |
allow_audio_input=False, | |
allow_video_input=False, | |
), | |
outputs=gr.outputs.Textbox(placeholder="Output text will appear here..."), | |
title="Chatbot", | |
description="Enter text and the chatbot will respond!", | |
theme="compact", | |
layout="vertical", | |
allow_flagging=False, | |
allow_screenshot=False, | |
allow_sharing=False, | |
examples=[ | |
["Hi there!"], | |
["What's your name?"], | |
["How old are you?"], | |
["What do you like to do for fun?"], | |
], | |
) | |
# Add the coding mode toggle to the interface | |
iface.add_mode("Coding Mode", coding_mode) | |
# Launch the interface | |
iface.launch(share=True) | |