Spaces:
Runtime error
Runtime error
File size: 1,893 Bytes
411d6fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Load the language model and tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
# Define the coding mode function
def coding_mode(text):
# Prepend the text with a coding prompt
prompt = ">>> "
input_text = prompt + text
# Generate a response from the model
input_ids = tokenizer.encode(input_text, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=1000, do_sample=True)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
# Remove the coding prompt from the response
response = response[len(prompt):]
return response
# Define the normal mode function
def normal_mode(text):
# Generate a response from the model
input_ids = tokenizer.encode(text, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=1000, do_sample=True)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return response
# Create the Gradio interface
iface = gr.Interface(
fn=normal_mode,
inputs=gr.inputs.Chat(
placeholder="Enter your message here...",
prompt="Me: ",
allow_audio_input=False,
allow_video_input=False,
),
outputs=gr.outputs.Textbox(placeholder="Output text will appear here..."),
title="Chatbot",
description="Enter text and the chatbot will respond!",
theme="compact",
layout="vertical",
allow_flagging=False,
allow_screenshot=False,
allow_sharing=False,
examples=[
["Hi there!"],
["What's your name?"],
["How old are you?"],
["What do you like to do for fun?"],
],
)
# Add the coding mode toggle to the interface
iface.add_mode("Coding Mode", coding_mode)
# Launch the interface
iface.launch(share=True)
|