File size: 12,546 Bytes
5188e99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96f6b73
 
5188e99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96f6b73
 
5188e99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
import os

import gradio as gr
import torch
import re
# from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
from datasets import load_dataset,Dataset
from shared_resources import shared_resources
from phi.agent import Agent
from phi.tools.duckduckgo import DuckDuckGo
from phi.agent import Agent, RunResponse
from phi.model.huggingface import HuggingFaceChat



class ResponseGenerator:
    def __init__(self):

        self.ST = shared_resources.sentence_transformer
        self.data = shared_resources.data
        self.original_query = ""
    
        # Define the search function
    def search(self, query: str, usernames=None, k: int = 3):
        """Function to get recommended videos based on user input"""
        self.embedded_query = self.ST.encode(query)  # Embed the user input
        self.all_retrievals=[]
        if usernames:
          dataset=self.data.to_pandas()
          for username in usernames:
            username = [username]
            filtered_df = dataset[dataset['username'].isin(username)]
            self.temp_data = Dataset.from_pandas(filtered_df)
            self.temp_data=self.temp_data.add_faiss_index("embeddings")
            self.scores, self.retrieved_examples = self.temp_data.get_nearest_examples("embeddings", self.embedded_query, k=k)  # Search for top k results
            self.all_retrievals.append(str(self.retrieved_examples['Caption'][0]))
            self.temp_data=None
          print('All retrievals are:',self.all_retrievals)
          return self.all_retrievals

        self.scores, self.retrieved_examples = self.data.get_nearest_examples("embeddings", self.embedded_query, k=k)  # Search for top k results
        return self.scores, self.retrieved_examples


    def generate_response(self, query, username=None, additional_focus=None):
        # print('The usernames are:',username)
        """
        Generates text using the Llama 3.1 model.
        """
        self.original_query = query  # Save the original query for future focus
        
        # If we are going deeper, add the additional focus to the prompt
        if additional_focus:
            # prompt = f"Explain the Given topic:\n{self.original_query}. Also focus on: {additional_focus}\n1."
            prompt = f"""
        I want to create a detailed storyline for a video primarily focusing on the sentence: **{additional_focus}**, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:  

        1. **Story:** How to introduce the scene and set the tone. What is happening in the scence? Describe key visuals and actions.  
        2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.  
        3. **Text in the Video:** Propose important text overlays for key moments.  
        4. **Transitions:** Smooth transitions between scenes to maintain flow.  
        5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).  
        6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.  

        The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.

        Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.


        """

        else:
            # prompt = f"Explain the Given topic:\n{query}\n1."
            prompt = f"""
        I want to create a detailed storyline for a video in any domain, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:  

        1. **Story:** How to introduce the scene and set the tone. What is happening in the scene? Describe key visuals and actions.  
        2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.  
        3. **Text in the Video:** Propose important text overlays for key moments.  
        4. **Transitions:** Smooth transitions between scenes to maintain flow.  
        5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).  
        6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.  

        The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.

        Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.

        """

        
        # prefix = f"The question is:{self.original_query}"
        # print('The data is:',self.data)
        if username:
            retrieved_list = self.search(query,username,1)
            retrieved_context = "\n".join(retrieved_list)
            prompt = prompt + f"\n Here is the random video story from the dataset for you. You can use it just for analysing purpose, not for similar generation. This is the story:\n{retrieved_context}"
            prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
        
        else:
            prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
        agent = Agent(
            model=HuggingFaceChat(
                id="meta-llama/Meta-Llama-3-8B-Instruct",
                # id="deepseek-ai/DeepSeek-R1",
                max_tokens=4096,
            ),
            # tools=[DuckDuckGo()],
            markdown=True
        )

        # Get the response in a variable
        run: RunResponse = agent.run(prompt)
        return run.content



    def extract_topics(self, story):
        """
        Extracts 5 key sentences from the generated text using KeyBERT.
        """

        prompt = f'''I want to brainstorm ways to diversify or improve a storyline in exactly 5 sentences. No more than 5 nor less than 5.  
The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement. 
For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:  
- I want to showcase the chef preparing a signature dish.  
- I want to add a sequence of customers sharing their experiences at the restaurant.  
- I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.  
- I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.  
- I want to feature a quick interview with the owner sharing the story behind the restaurant. 

Now, I will provide you with the storyline. The storyline is:\n{story}
Please remember, don't give any introduction or explanations. Just generate 5 sentences directly, focusing on creative suggestions for diversifying or modifying the storyline.  '''


        agent = Agent(
            model=HuggingFaceChat(
                id="meta-llama/Meta-Llama-3-8B-Instruct",
                # id="deepseek-ai/DeepSeek-R1",
                max_tokens=4096,
            ),
            # tools=[DuckDuckGo()],
            markdown=True
        )

        # Get the response in a variable
        run: RunResponse = agent.run(prompt)
        generated_text=run.content
        # Split the text into sentences and strip each one
        sentences = [sentence.strip() for sentence in re.split(r'[.?]', generated_text) if sentence.strip()]
        print('The sentences are:',sentences)

        return sentences[-4:]


    def on_select_topic(self, selected_topic, history_stack, current_state):
        """
        Generates new points for the selected topic and updates history.
        """
        # Save current state in history
        history_stack.append(current_state)

        # Generate new outputs with the selected topic as additional focus
        new_response = self.generate_response(self.original_query, additional_focus=selected_topic)
        new_topics = self.extract_topics(new_response)

        # Prepare new state
        new_state = {
            "response": new_response,
            "topics": new_topics,
            "key_topics": new_topics
        }

        return new_state, history_stack, gr.update(value=new_response), gr.update(choices=new_topics)

    def on_back(self, history_stack):
        """
        Restores the previous state for all outputs.
        """
        if history_stack:
            # Pop the last state from history
            previous_state = history_stack.pop()

            return history_stack, gr.update(value=previous_state["response"]), \
                gr.update(choices=previous_state["key_topics"])

        # If no history, clear outputs
        return history_stack, gr.update(value=""), gr.update(choices=[])

class ResponseGeneratorApp:
    def __init__(self):
        self.point_generator = ResponseGenerator()

    def build_ui(self):
        with gr.Blocks() as demo:
            gr.Markdown(
                """
                #Brainstorming App
                Enter a query to generate a detailed response and start brainstroming for further exploration.
                """
            )

            query_input = gr.Textbox(
                label="Enter your query",
                placeholder="Type a query, e.g., 'I want to create a promotional video of Begnas Lake.'",
                lines=2,
            )

            usernames = [
        "_travelwithsapana", "givina_9", "rajen.rb", "wh0z.khu5h1", "palam061", 
        "prettiest_sky", "explorepokhara", "ggkaam610", "anjana_dhl1"
            ]

            # username_inputs =gr.Radio(label="Select Username of whose you want similar story::", choices=usernames, type="value")

            username_inputs = gr.CheckboxGroup(choices=usernames,label="Choose one or more username of whose you want similar story::",type="value")
        
    

            generate_btn = gr.Button(value="Generate")

            # Output box for the generated text
            response_output = gr.Textbox(
                label="Generated Response",
                lines=10,
                interactive=False
            )

            # Dynamic radio buttons area for the extracted topics
            topics_radio = gr.Radio(
                label="Brain Stroming Areas....",
                choices=[],
                type="value",
                interactive=True
            )

            back_btn = gr.Button(value="Back")

            # State for managing current topics and history
            current_state = gr.State({})  # Store response, topics, and key_topics
            history_stack = gr.State([])  # Stack of previous states

            # Link the generate button to the processing function
            generate_btn.click(
                fn=lambda query,usernames: self.generate_handler(query,usernames),
                inputs=[query_input,username_inputs], 
                outputs=[current_state, response_output, topics_radio],
            )

            # Handle selection of a topic (generate new stage)
            topics_radio.change(
                fn=self.point_generator.on_select_topic,
                inputs=[topics_radio, history_stack, current_state],
                outputs=[current_state, history_stack, response_output, topics_radio]
            )

            # Handle back button
            back_btn.click(
                fn=self.point_generator.on_back,
                inputs=[history_stack],
                outputs=[history_stack, response_output, topics_radio]
            )

        return demo

    def generate_handler(self, query,usernames):
        """
        Handles the generation of the response and topics.
        """
        response = self.point_generator.generate_response(query,usernames)
        topics = self.point_generator.extract_topics(response)

        # Prepare the current state
        current_state = {
            "response": response,
            "topics": topics,
            "key_topics": topics
        }

        return current_state, gr.update(value=response), gr.update(choices=topics)