import gradio as gr from transformers import pipeline, BartTokenizer, BartForConditionalGeneration # Initialize the image classification pipeline classifier = pipeline("image-classification", model="google/vit-base-patch16-224") # Initialize the tokenizer and model for the generative text (GPT-like model) model_name = "textattack/facebook-bart-base-RTE" # Example BART model for demonstration tokenizer = BartTokenizer.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) def generate_tweet(label): # Craft a prompt that naturally encourages engaging and relevant tweet content prompt = f"write a tweet about {label}" inputs = tokenizer.encode(prompt, return_tensors="pt") outputs = model.generate(inputs, max_length=280, num_return_sequences=1, no_repeat_ngram_size=2) tweet = tokenizer.decode(outputs[0], skip_special_tokens=True) return tweet def predict(image): predictions = classifier(image) # Sort predictions based on confidence and select the top one top_prediction = sorted(predictions, key=lambda x: x['score'], reverse=True)[0] label = top_prediction['label'].split(',')[0] # Clean up label if necessary # Generate the tweet tweet = generate_tweet(label) return tweet title = "Image Classifier to Generative Tweet" description = "This demo recognizes and classifies images using the 'google/vit-base-patch16-224' model and generates a tweet about the top prediction using the 'facebook/bart-large-cnn' generative text model." input_component = gr.Image(type="pil", label="Upload an image here") output_component = gr.Textbox(label="Generated Promotional Tweet") gr.Interface(fn=predict, inputs=input_component, outputs=output_component, title=title, description=description).launch()