|
import gradio as gr |
|
from transformers import pipeline |
|
import torch |
|
import spaces |
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
|
|
@spaces.GPU |
|
def generate_response(user_input, history): |
|
pipe = pipeline("text-generation", model="explorewithai/ChatFrame-Uncensored-Instruct-Small", device = device) |
|
messages = [ |
|
{"role": "user", "content": user_input}, |
|
] |
|
response = pipe(messages, max_length=512) |
|
return response[0]['generated_text'][1]["content"] |
|
|
|
iface = gr.ChatInterface( |
|
fn=generate_response, |
|
title="Text Generation Chatbot", |
|
description="Enter your text and get a generated response from the model." |
|
) |
|
|
|
iface.launch() |
|
|