Brain-Stroming-Story-Gen / brain_strom_with_influencer_input.py
subashdvorak's picture
Update brain_strom_with_influencer_input.py
5188e99 verified
import os
import gradio as gr
import torch
import re
# from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
from datasets import load_dataset,Dataset
from shared_resources import shared_resources
from phi.agent import Agent
from phi.tools.duckduckgo import DuckDuckGo
from phi.agent import Agent, RunResponse
from phi.model.huggingface import HuggingFaceChat
class ResponseGenerator:
def __init__(self):
self.ST = shared_resources.sentence_transformer
self.data = shared_resources.data
self.original_query = ""
# Define the search function
def search(self, query: str, usernames=None, k: int = 3):
"""Function to get recommended videos based on user input"""
self.embedded_query = self.ST.encode(query) # Embed the user input
self.all_retrievals=[]
if usernames:
dataset=self.data.to_pandas()
for username in usernames:
username = [username]
filtered_df = dataset[dataset['username'].isin(username)]
self.temp_data = Dataset.from_pandas(filtered_df)
self.temp_data=self.temp_data.add_faiss_index("embeddings")
self.scores, self.retrieved_examples = self.temp_data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
self.all_retrievals.append(str(self.retrieved_examples['Caption'][0]))
self.temp_data=None
print('All retrievals are:',self.all_retrievals)
return self.all_retrievals
self.scores, self.retrieved_examples = self.data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
return self.scores, self.retrieved_examples
def generate_response(self, query, username=None, additional_focus=None):
# print('The usernames are:',username)
"""
Generates text using the Llama 3.1 model.
"""
self.original_query = query # Save the original query for future focus
# If we are going deeper, add the additional focus to the prompt
if additional_focus:
# prompt = f"Explain the Given topic:\n{self.original_query}. Also focus on: {additional_focus}\n1."
prompt = f"""
I want to create a detailed storyline for a video primarily focusing on the sentence: **{additional_focus}**, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
1. **Story:** How to introduce the scene and set the tone. What is happening in the scence? Describe key visuals and actions.
2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
3. **Text in the Video:** Propose important text overlays for key moments.
4. **Transitions:** Smooth transitions between scenes to maintain flow.
5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
"""
else:
# prompt = f"Explain the Given topic:\n{query}\n1."
prompt = f"""
I want to create a detailed storyline for a video in any domain, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
1. **Story:** How to introduce the scene and set the tone. What is happening in the scene? Describe key visuals and actions.
2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
3. **Text in the Video:** Propose important text overlays for key moments.
4. **Transitions:** Smooth transitions between scenes to maintain flow.
5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
"""
# prefix = f"The question is:{self.original_query}"
# print('The data is:',self.data)
if username:
retrieved_list = self.search(query,username,1)
retrieved_context = "\n".join(retrieved_list)
prompt = prompt + f"\n Here is the random video story from the dataset for you. You can use it just for analysing purpose, not for similar generation. This is the story:\n{retrieved_context}"
prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
else:
prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
agent = Agent(
model=HuggingFaceChat(
id="meta-llama/Meta-Llama-3-8B-Instruct",
max_tokens=4096,
),
# tools=[DuckDuckGo()],
markdown=True
)
# Get the response in a variable
run: RunResponse = agent.run(prompt)
return run.content
def extract_topics(self, story):
"""
Extracts 5 key sentences from the generated text using KeyBERT.
"""
prompt = f'''I want to brainstorm ways to diversify or improve a storyline in exactly 5 sentences. No more than 5 nor less than 5.
The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement.
For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:
- I want to showcase the chef preparing a signature dish.
- I want to add a sequence of customers sharing their experiences at the restaurant.
- I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.
- I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.
- I want to feature a quick interview with the owner sharing the story behind the restaurant.
Now, I will provide you with the storyline. The storyline is:\n{story}
Please remember, don't give any introduction or explanations. Just generate 5 sentences directly, focusing on creative suggestions for diversifying or modifying the storyline. '''
agent = Agent(
model=HuggingFaceChat(
id="meta-llama/Meta-Llama-3-8B-Instruct",
max_tokens=4096,
),
# tools=[DuckDuckGo()],
markdown=True
)
# Get the response in a variable
run: RunResponse = agent.run(prompt)
generated_text=run.content
# Split the text into sentences and strip each one
sentences = [sentence.strip() for sentence in re.split(r'[.?]', generated_text) if sentence.strip()]
print('The sentences are:',sentences)
return sentences[-4:]
def on_select_topic(self, selected_topic, history_stack, current_state):
"""
Generates new points for the selected topic and updates history.
"""
# Save current state in history
history_stack.append(current_state)
# Generate new outputs with the selected topic as additional focus
new_response = self.generate_response(self.original_query, additional_focus=selected_topic)
new_topics = self.extract_topics(new_response)
# Prepare new state
new_state = {
"response": new_response,
"topics": new_topics,
"key_topics": new_topics
}
return new_state, history_stack, gr.update(value=new_response), gr.update(choices=new_topics)
def on_back(self, history_stack):
"""
Restores the previous state for all outputs.
"""
if history_stack:
# Pop the last state from history
previous_state = history_stack.pop()
return history_stack, gr.update(value=previous_state["response"]), \
gr.update(choices=previous_state["key_topics"])
# If no history, clear outputs
return history_stack, gr.update(value=""), gr.update(choices=[])
class ResponseGeneratorApp:
def __init__(self):
self.point_generator = ResponseGenerator()
def build_ui(self):
with gr.Blocks() as demo:
gr.Markdown(
"""
#Brainstorming App
Enter a query to generate a detailed response and start brainstroming for further exploration.
"""
)
query_input = gr.Textbox(
label="Enter your query",
placeholder="Type a query, e.g., 'I want to create a promotional video of Begnas Lake.'",
lines=2,
)
usernames = [
"_travelwithsapana", "givina_9", "rajen.rb", "wh0z.khu5h1", "palam061",
"prettiest_sky", "explorepokhara", "ggkaam610", "anjana_dhl1"
]
# username_inputs =gr.Radio(label="Select Username of whose you want similar story::", choices=usernames, type="value")
username_inputs = gr.CheckboxGroup(choices=usernames,label="Choose one or more username of whose you want similar story::",type="value")
generate_btn = gr.Button(value="Generate")
# Output box for the generated text
response_output = gr.Textbox(
label="Generated Response",
lines=10,
interactive=False
)
# Dynamic radio buttons area for the extracted topics
topics_radio = gr.Radio(
label="Brain Stroming Areas....",
choices=[],
type="value",
interactive=True
)
back_btn = gr.Button(value="Back")
# State for managing current topics and history
current_state = gr.State({}) # Store response, topics, and key_topics
history_stack = gr.State([]) # Stack of previous states
# Link the generate button to the processing function
generate_btn.click(
fn=lambda query,usernames: self.generate_handler(query,usernames),
inputs=[query_input,username_inputs],
outputs=[current_state, response_output, topics_radio],
)
# Handle selection of a topic (generate new stage)
topics_radio.change(
fn=self.point_generator.on_select_topic,
inputs=[topics_radio, history_stack, current_state],
outputs=[current_state, history_stack, response_output, topics_radio]
)
# Handle back button
back_btn.click(
fn=self.point_generator.on_back,
inputs=[history_stack],
outputs=[history_stack, response_output, topics_radio]
)
return demo
def generate_handler(self, query,usernames):
"""
Handles the generation of the response and topics.
"""
response = self.point_generator.generate_response(query,usernames)
topics = self.point_generator.extract_topics(response)
# Prepare the current state
current_state = {
"response": response,
"topics": topics,
"key_topics": topics
}
return current_state, gr.update(value=response), gr.update(choices=topics)