Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
# Load the fine-tuned model and tokenizer | |
model_name = "gpt2" | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
def generate_pixel_art(prompt): | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
output = model.generate(input_ids, max_length=2304, num_return_sequences=1) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
return generated_text | |
def hex_to_rgb(hex_string): | |
return [int(hex_string[i:i+2], 16) for i in (0, 2, 4)] | |
def visualize_pixel_art(hex_string): | |
pixels = [hex_to_rgb(hex_string[i:i+6]) for i in range(0, len(hex_string), 6)] | |
img = Image.new('RGB', (16, 16)) | |
img.putdata(pixels) | |
return img | |
demo = gr.Interface( | |
fn=generate_pixel_art, | |
inputs=gr.Textbox(lines=2, placeholder="Enter a prompt for pixel art generation..."), | |
outputs=[gr.Textbox(label="Generated Hex String"), gr.Image(label="Visualized Pixel Art")], | |
title="LlamaSquint Pixel Art Generator", | |
description="Generate pixel art using a fine-tuned LLaMa model" | |
) | |
if __name__ == "__main__": | |
demo.launch() |