|
import gradio as gr |
|
from transformers import AutoTokenizer, VisionEncoderDecoderModel, ViTImageProcessor |
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
device = 'cpu' |
|
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
feature_extractor = ViTImageProcessor.from_pretrained(encoder_checkpoint) |
|
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint) |
|
caption_model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device) |
|
|
|
|
|
diffusion_model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
|
diffusion_model = diffusion_model.to(device) |
|
|
|
def get_caption(image): |
|
|
|
image = image.convert('RGB') |
|
image_tensor = feature_extractor(images=image, return_tensors="pt").pixel_values.to(device) |
|
caption_ids = caption_model.generate(image_tensor, max_length=128, num_beams=3)[0] |
|
caption_text = tokenizer.decode(caption_ids, skip_special_tokens=True) |
|
return caption_text |
|
|
|
def generate_image(caption): |
|
|
|
generated_image = diffusion_model(caption)["sample"][0] |
|
return generated_image |
|
|
|
|
|
title = "Image Captioning and Generation" |
|
with gr.Blocks(title=title) as demo: |
|
with gr.Row(): |
|
with gr.Column(): |
|
image_input = gr.Image(label="Upload any Image", type='pil') |
|
get_caption_btn = gr.Button("Get Caption") |
|
with gr.Column(): |
|
caption_output = gr.Textbox(label="Caption") |
|
generate_image_btn = gr.Button("Generate Image") |
|
with gr.Row(): |
|
generated_image_output = gr.Image(label="Generated Image") |
|
|
|
get_caption_btn.click(get_caption, inputs=image_input, outputs=caption_output) |
|
generate_image_btn.click(generate_image, inputs=caption_output, outputs=generated_image_output) |
|
|
|
demo.launch() |
|
|