import gradio as gr from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer import re # Load the fine-tuned GPT-2 model and tokenizer model_dir = "Manasa1/finetuned_GPT23" fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir) fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir) # Create a text-generation pipeline generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer) # Function to intelligently add relevant hashtags and emojis def add_relevant_tags(tweet, input_question): # Pre-defined mappings of topics to hashtags and emojis topic_to_hashtags = { "startup": ["#Startups", "#Innovation", "#Entrepreneurship"], "AI": ["#AI", "#ArtificialIntelligence", "#Tech"], "technology": ["#Technology", "#Future", "#Tech"], "future": ["#Future", "#Vision", "#Tech"], } topic_to_emojis = { "startup": "🚀", "AI": "🤖", "technology": "💻", "future": "🌟", } # Determine topic from input question (using keywords) topic = None for key in topic_to_hashtags.keys(): if key.lower() in input_question.lower(): topic = key break # Add relevant hashtags and emoji if a topic is detected if topic: hashtags = " ".join(topic_to_hashtags[topic][:2]) # Take up to 2 hashtags emoji = topic_to_emojis[topic] tweet = f"{tweet} {emoji} {hashtags}" return tweet.strip() def generate_tweet(input_question): # Format the input without "Question:" and "Answer:" prompt = input_question.strip() # Generate the output with a max length of around 200 to 280 characters output = generator(prompt, max_length=300, num_return_sequences=1, temperature=0.7, top_p=0.9) # Extract the generated text tweet = output[0]['generated_text'] # Ensure the tweet is between 200 and 280 characters tweet_length = len(tweet) if tweet_length > 280: tweet = tweet[:280] last_period = tweet.rfind(".") if last_period != -1: tweet = tweet[:last_period + 1] elif tweet_length < 200: tweet = tweet.ljust(200) # Add relevant hashtags and emojis tweet = add_relevant_tags(tweet, input_question) return tweet # Create the Gradio interface interface = gr.Interface( fn=generate_tweet, inputs=gr.Textbox(label="Enter a prompt/question", placeholder="Write a tweet about AI."), outputs=gr.Textbox(label="Generated Tweet"), title="Tweet Generator", description="Generate concise, relevant tweets enriched with appropriate emojis and hashtags using a fine-tuned GPT-2 model." ) # Launch the interface interface.launch()