Adam-Ben-Khalifa commited on
Commit
6c9eafd
1 Parent(s): ffacf64

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +352 -175
pipeline.py CHANGED
@@ -18,24 +18,27 @@ client = AsyncOpenAI(
18
 
19
  # Defining the PromptEnhancer class containing the necessary components for the Advanced Prompt Generation Pipeline
20
  class PromptEnhancer:
21
- def __init__(self, model="gpt-4o-mini", temperature=0.0, tools_dict={}):
22
  self.model = model
23
  self.prompt_tokens = 0
24
  self.completion_tokens = 0
25
  self.tools_dict = tools_dict
26
 
 
27
  async def call_llm(self, prompt):
28
  """Call the LLM with the given prompt"""
29
  response = await client.chat.completions.create(
30
  model=self.model,
31
  messages=[
32
  {"role": "system",
33
- "content": "You are an assistant designed to provide concise and specific information based solely on the given tasks.\
34
- Do not include any additional information, explanations, or context beyond what is explicitly requested."
 
 
35
  },
36
  {"role": "user",
37
  "content": prompt
38
- }
39
  ],
40
  temperature=0.0, # from 0 (precise and almost deterministic answer) to 2 (creative and almost random answer)
41
  )
@@ -45,198 +48,372 @@ class PromptEnhancer:
45
 
46
  return response.choices[0].message.content
47
 
48
- async def analyze_input(self, basic_prompt):
49
- """Analyze the input prompt to determine its key information"""
50
- analysis_prompt = f"""
51
- Analyze the following {{prompt}} and generate brief answers to these key information that will be beneficial to enhance the prompt:
52
- 1. Main topic of the prompt
53
- 2. The most convenient output format for the prompt
54
- 3. Specific requirements for the prompt, if necessary
55
- 4. Suggested strategies to enhance the prompt for better output result
56
 
57
- {{prompt}}: {basic_prompt}
 
 
 
 
 
 
 
 
 
 
 
58
 
59
- Your output will be only the result of the information required above in text format.
60
- Do not return a general explanation of the generation process.
61
- """
62
- return await self.call_llm(analysis_prompt)
63
-
64
- async def expand_instructions(self, basic_prompt, analysis):
65
- """Expand the basic prompt with clear, detailed instructions"""
66
- expansion_prompt = f"""
67
- Based on this {{analysis}}:
68
-
69
- {analysis}
70
-
71
- Expand the following {{basic_prompt}} following these instructions:
72
- 1. Add relevant details to clarify the prompt only if necessary
73
- 2. Suggest an appropriate persona for the AI Model
74
- 3. Generate 1-2 related examples to guide the output generation
75
- 4. Suggest an optimal output length
76
- 5. Use delimiter, {{ }}, to clearly indicate the parts of the input that should be concidered as variables
77
-
78
- {{basic_prompt}}: {basic_prompt}
79
-
80
- Your output will be only the result of the information required above in text format and not a dictionary format.
81
- Make sure the generated output maintains the sructure of a prompt for an AI Model.
82
- Make sure the generated output maintains the goal and context of the {{basic_prompt}}.
83
- Do not include the instructions headers in the generated answer.
84
- Do not return a general explanation of the generation process.
85
- Do not generate an answer for the prompt.
86
- """
87
- return await self.call_llm(expansion_prompt)
88
 
89
- async def decompose_task(self, expanded_prompt):
90
- """Break down complex tasks into subtasks"""
91
- decomposition_prompt = f"""
92
- Break down the following {{prompt}} into subtasks for better output generation and follow these instructions:
93
- 1. Identify main task components and their corresponding subtasks
94
- 2. Create specific instructions for each subtask
95
- 3. Define success criteria for each subtask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- {{prompt}}: {expanded_prompt}
98
 
99
- Your output will be only the result of the task required above in text format.
100
- Follow the (Main-task/ Sub-task/ Instructions/ Success-criteria) format.
101
- Do not return a general explanation of the generation process.
102
- """
103
- return await self.call_llm(decomposition_prompt)
104
 
105
- async def add_reasoning(self, expanded_prompt):
106
- """Add instructions for showing reasoning, chain-of-thought, and self-review"""
107
- reasoning_prompt = f"""
108
- Based on the following {{prompt}}, suggest instructions in order to guide the AI Model to:
109
- 1. Show reasoning through using the chain-of-thought process
110
- 2. Use inner-monologue only if it is recommended to hide parts of the thought process
111
- 3. Self-review and check for missed information
112
 
113
- {{prompt}}: {expanded_prompt}
 
114
 
115
- Your output will be only the set of instructions in text format.
116
- Do not return a general explanation of the generation process.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  """
118
- return await self.call_llm(reasoning_prompt)
 
 
119
 
120
- async def create_eval_criteria(self, expanded_prompt):
121
- """Generate evaluation criteria for the prompt output"""
122
- evaluation_prompt = f"""
123
- Create evaluation criteria for assessing the quality of the output for this {{prompt}}:
124
- 1. List 1-3 specific criteria
125
- 2. Briefly explain how to measure each criterion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
- {{prompt}}: {expanded_prompt}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- Your output will be only the result of the information required above in text format.
130
- Do not return a general explanation of the generation process.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  """
132
- return await self.call_llm(evaluation_prompt)
 
133
 
134
- async def suggest_references(self, expanded_prompt):
135
- """Suggest relevant references and explain how to use them"""
136
- reference_prompt = f"""
137
- For the following {{prompt}}, suggest relevant reference texts or sources that could help enhance the output of the prompt if possible,
138
- and if not, do not return anything:
139
- 1. List 0-3 potential references
140
- 2. Briefly explain how to incorporate these references to enhance the prompt
141
-
142
- {{prompt}}: {expanded_prompt}
143
-
144
- Your output will be only the result of the information required above in a dictionary called "References" containing the references titles as keys,
145
- and their corresponding explanation of incorporation as values. If no references will be suggested, return an empty dictionary.
146
- Do not return a general explanation of the generation process.
147
- """
148
- return await self.call_llm(reference_prompt)
149
 
150
- async def suggest_tools(self, expanded_prompt, tools_dict):
151
- """Suggest relevant external tools or APIs"""
152
- tool_prompt = f"""
153
- For the following {{prompt}}, suggest relevant external tools from the provided {{tools_dict}} that can enhance the prompt for better execution.
154
- If the prompt does not require tools for its output, it is highly-recommended to not return any tools:
155
- 1. List 0-3 potential tools/APIs
156
- 2. Briefly explain how to use these tools within the prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
- {{prompt}}: {expanded_prompt}
159
- {{tools_dict}}: {tools_dict}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
- Your output will be only the result of the information required above in a dictionary containing the suggested tools as keys,
162
- and their corresponding way of usage with the prompt as values. If no tools will be suggested, return an empty dictionary.
163
- Do not return a general explanation of the generation process.
164
- """
165
- return await self.call_llm(tool_prompt)
166
 
167
- async def assemble_prompt(self, components):
168
- """Assemble all components into a cohesive advanced prompt"""
169
- assembly_prompt = f"""
170
- Assemble all the following {{components}} into a cohesive, and well-structured advanced prompt and do not generate a response for the prompt.
171
- Make sure to combine the {{reasoning_process}} and {{subtasks}} sections into one section called {{reasoning_process_and_subtasks}}.
172
-
173
- {{components}}: {components}
174
-
175
- Your output will be only the result of the tasks required above,
176
- which is an advanced coherent prompt generated from the combination of the given components dictionary.
177
- Keep only the {{reasoning_process_and_subtasks}} section instead of the {{reasoning_process}} and {{subtasks}} sections in the output.
178
- Ensure that the assembled prompt maintains the delimiter structure of variables and the suggested persona.
179
- Make sure that each sub-section of the prompt is clear and has a title.
180
- The output is in plain text format and not a dictionary format.
181
- Do not return a general explanation of the generation process.
182
- Take the return-to-line symbol into consideration.
183
- Remove the "**Expanded Prompt**" header.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  """
185
- return await self.call_llm(assembly_prompt)
186
 
187
- async def auto_eval(self, assembled_prompt, evaluation_criteria):
188
- """Perform Auto-Evaluation and Auto-Adjustment"""
189
- auto_eval_prompt = f"""
190
- Perform any minor adjustments on the given {{prompt}} based on how likely its output will satisfy these {{evaluation_criteria}}.
191
- Only perform minor changes if it is necessary and return the updated prompt as output.
192
- If no changes are necessary, do not change the prompt and return it as output.
193
-
194
- {{prompt}}: {assembled_prompt}
195
- {{evaluation_criteria}}: {evaluation_criteria}
196
-
197
- Your output will be only the result of the tasks required above, which is an updated version of the {{prompt}}, in text format.
198
- Make sure to keep the {{evaluation_criteria}} in the output prompt.
199
- Do not return a general explanation of the generation process.
200
- Make sure there is no generated answer for the prompt.
201
- Make sure to maintain the stucture of the {{prompt}}.
202
- """
203
- return await self.call_llm(auto_eval_prompt)
204
-
205
- async def enhance_prompt(self, basic_prompt, perform_eval=False):
206
- """Main method to enhance a basic prompt to an advanced one"""
207
- analysis = await self.analyze_input(basic_prompt)
208
- expanded_prompt = await self.expand_instructions(basic_prompt, analysis)
209
-
210
- evaluation_criteria, references, subtasks, reasoning, tools = await asyncio.gather(
211
- self.create_eval_criteria(expanded_prompt),
212
- self.suggest_references(expanded_prompt),
213
- self.decompose_task(expanded_prompt),
214
- self.add_reasoning(expanded_prompt),
215
- self.suggest_tools(expanded_prompt, tools_dict={}),
216
  )
217
-
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  components = {
219
- "expanded_prompt": expanded_prompt,
220
- "references": references,
221
- "subtasks": subtasks,
222
- "tools": tools,
223
- "reasoning_process": reasoning,
224
- "evaluation_criteria": evaluation_criteria,
225
-
226
  }
227
-
228
- assembled_prompt = await self.assemble_prompt(components)
229
 
230
- if perform_eval:
231
- eveluated_prompt = await self.auto_eval(assembled_prompt, evaluation_criteria)
232
- advanced_prompt = eveluated_prompt
233
- else:
234
- advanced_prompt = assembled_prompt
235
-
236
- return {
237
- "advanced_prompt": advanced_prompt,
238
- "assembled_prompt": assembled_prompt,
239
- "components": components,
240
- "analysis": analysis,
241
- }
242
-
 
18
 
19
  # Defining the PromptEnhancer class containing the necessary components for the Advanced Prompt Generation Pipeline
20
  class PromptEnhancer:
21
+ def __init__(self, model="gpt-4o-mini", tools_dict={}):
22
  self.model = model
23
  self.prompt_tokens = 0
24
  self.completion_tokens = 0
25
  self.tools_dict = tools_dict
26
 
27
+
28
  async def call_llm(self, prompt):
29
  """Call the LLM with the given prompt"""
30
  response = await client.chat.completions.create(
31
  model=self.model,
32
  messages=[
33
  {"role": "system",
34
+ "content":
35
+ "You are a highly intelligent AI assistant. Your task is to analyze, and comprehend the provided prompt,\
36
+ then provide clear, and concise response based strictly on the given instructions.\
37
+ Do not include any additional explanations or context beyond the required output."
38
  },
39
  {"role": "user",
40
  "content": prompt
41
+ }
42
  ],
43
  temperature=0.0, # from 0 (precise and almost deterministic answer) to 2 (creative and almost random answer)
44
  )
 
48
 
49
  return response.choices[0].message.content
50
 
 
 
 
 
 
 
 
 
51
 
52
+ async def analyze_and_expand_input(self, input_prompt):
53
+ analysis_and_expansion_prompt = f"""
54
+ You are a highly intelligent assistant.
55
+ Analyze the provided {{prompt}} and generate concise answers for the following key aspects:
56
+
57
+ - **Main goal of the prompt:** Identify the core subject or request within the provided prompt.
58
+ - **Persona:** Recommend the most relevant persona for the AI model to adopt (e.g., expert, teacher, conversational, etc.)
59
+ - **Optimal output length:** Suggest an optimal output length (short, brief, medium, long) based on the task, and give an approximate number of words if it is suitable for the case.
60
+ - **Most convenient output format:** Recommend the optimal format for the result (e.g., list, paragraph, code snippet, table, JSON, etc.).
61
+ - **Specific requirements:** Highlight any special conditions, rules, or expectations stated or implied within the prompt.
62
+ - **Suggested improvements:** Offer recommendations on how to modify or enhance the prompt for more precise or efficient output generation.
63
+ - **One-shot prompting:** Create one related examples to guide the output generation.
64
 
65
+ Then use them to reformulate and expand the provided {{prompt}}.
66
+ Return the expanded prompt as output in text format. Refrain from explaining the generation process.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ Example 1:
69
+ {{prompt}}: "Explain quantum entanglement to a 10-year-old."
70
+
71
+ *thought_process*:
72
+ - **Main goal of the prompt:** Simplify complex quantum physics concept for children.
73
+ - **Persona:** Patient, friendly teacher
74
+ - **Optimal output length:** Brief (100-150 words)
75
+ - **Most convenient output format:** Narrative with analogy
76
+ - **Specific requirements:** Age-appropriate explanation (10-year-old).
77
+ - **Suggested improvements:**
78
+ - Request specific analogies
79
+ - Include interactive elements
80
+ - Add follow-up questions
81
+ - Suggest visual aids
82
+ - **One-shot prompting:**
83
+ Output example:
84
+ "Imagine you have two special pairs of socks. When you put one sock in your room and the other sock in the kitchen,
85
+ something magical happens! Whatever happens to one sock instantly affects the other sock.
86
+ If you turn one sock inside out, the other sock automatically turns inside out too, no matter how far apart they are!"
87
+
88
+ *output*:
89
+ As a friendly science teacher, please explain quantum entanglement to a 10-year-old student using these guidelines:
90
+
91
+ Start with a relatable analogy using everyday objects
92
+ Use simple, clear language avoiding technical terms
93
+ Include 2-3 interactive examples that demonstrate the concept
94
+ Add fun facts that will spark curiosity
95
+ End with simple questions to check understanding
96
+ Keep the explanation brief (100-150 words)
97
 
98
+ Structure your explanation as:
99
 
100
+ Opening analogy
101
+ Main explanation with examples
102
+ Interactive "What if?" scenarios
103
+ Fun facts about quantum entanglement
104
+ Check-for-understanding questions
105
 
106
+ Remember to maintain an enthusiastic and encouraging tone throughout the explanation.
107
+
108
+ Output example:
109
+ Imagine you have two special pairs of socks. When you put one sock in your room and the other sock in the kitchen,
110
+ something magical happens! Whatever happens to one sock instantly affects the other sock.
111
+ If you turn one sock inside out, the other sock automatically turns inside out too, no matter how far apart they are!
 
112
 
113
+ Example 2:
114
+ {{prompt}}: "Write a function to calculate the Fibonacci sequence up to n terms."
115
 
116
+ *thought_process*:
117
+ - **Main goal of the prompt:** Create a programming function that generates Fibonacci numbers
118
+ - **Persona:** Programming expert
119
+ - **Optimal output length:** Medium (150-200 words including code)
120
+ - **Most convenient output format:** Code snippet with explanatory comments
121
+ - **Specific requirements:** Function must accept parameter n for sequence length
122
+ - **Suggested improvements:**
123
+ - Specify programming language
124
+ - Clarify if 0 should be included as first term
125
+ - Define expected handling of negative inputs
126
+ - **One-shot prompting:**
127
+
128
+ *output*:
129
+ As an expert programmer, please create a well-documented function to generate the Fibonacci sequence.
130
+
131
+ Requirements:
132
+ Accept a parameter 'n' specifying the number of terms to generate
133
+ Handle edge cases (n <= 0, n == 1)
134
+ Return the sequence as a list/array
135
+ Include proper error handling
136
+ Add comments explaining the logic
137
+
138
+ Provide the implementation in Python, including:
139
+ Function definition with docstring
140
+ Input validation
141
+ Core algorithm
142
+ Example usage with outputs for n=5, n=1, and n=0
143
+
144
+ For reference, the sequence should start with [0, 1, ...] where each subsequent number is the sum of the previous two numbers.
145
+
146
+
147
+ Now, analyze the following prompt then return only the generated *output*:
148
+ {{prompt}}: {input_prompt}
149
  """
150
+
151
+ return await self.call_llm(analysis_and_expansion_prompt)
152
+
153
 
154
+ async def decompose_and_add_reasoning(self, expanded_prompt):
155
+ decomposition_and_reasoning_prompt = f"""
156
+ You are a highly capable AI assistant tasked with improving complex task execution.
157
+ Analyze the provided {{prompt}}, and use it to generate the following output:
158
+
159
+ - **Subtasks decomposition:** Break down the task described in the prompt into manageable and specific subtasks that the AI model needs to address.
160
+ - **Chain-of-thought reasoning:** For subtasks that involve critical thinking or complex steps, add reasoning using a step-by-step approach to improve decision-making and output quality.
161
+ - **Success criteria:** Define what constitutes a successful completion for each subtask, ensuring clear guidance for expected results.
162
+
163
+ Return the following structured output for each subtask:
164
+
165
+ 1. **Subtask description**: Describe a specific subtask.
166
+ 2. **Reasoning**: Provide reasoning or explanation for why this subtask is essential or how it should be approached.
167
+ 3. **Success criteria**: Define what successful completion looks like for this subtask.
168
+
169
+ Example 1:
170
+ {{Prompt}}: "Explain how machine learning models are evaluated using cross-validation."
171
+
172
+ ##THOUGHT PROCESS##
173
+ *Subtask 1*:
174
+ - **Description**: Define cross-validation and its purpose.
175
+ - **Reasoning**: Clarifying the concept ensures the reader understands the basic mechanism behind model evaluation.
176
+ - **Success criteria**: The explanation should include a clear definition of cross-validation and its role in assessing model performance.
177
+ *Subtask 2*:
178
+ - **Description**: Describe how cross-validation splits data into training and validation sets.
179
+ - **Reasoning**: Explaining the split is crucial to understanding how models are validated and tested for generalization.
180
+ - **Success criteria**: A proper explanation of k-fold cross-validation with an illustration of how data is split.
181
+ *Subtask 3*:
182
+ - **Description**: Discuss how cross-validation results are averaged to provide a final evaluation metric.
183
+ - **Reasoning**: Averaging results helps mitigate the variance in performance due to different training/validation splits.
184
+ - **Success criteria**: The output should clearly explain how the final model evaluation is derived from multiple iterations of cross-validation.
185
 
186
+ Example 2:
187
+ {{Prompt}}: "Write a function to calculate the factorial of a number."
188
+
189
+ ##THOUGHT PROCESS##
190
+ *Subtask 1*:
191
+ - **Description**: Define what a factorial is.
192
+ - **Reasoning**: Starting with a definition ensures the user understands the mathematical operation required.
193
+ - **Success criteria**: Provide a concise definition with an example (e.g., 5! = 5 x 4 x 3 x 2 x 1 = 120).
194
+ *Subtask 2*:
195
+ - **Description**: Write the base case for the factorial function.
196
+ - **Reasoning**: In recursive programming, defining a base case is essential to avoid infinite recursion.
197
+ - **Success criteria**: Include a clear base case, such as `n = 1`, to ensure termination of recursion.
198
+ *Subtask 3*:
199
+ - **Description**: Implement the recursive step for the factorial function.
200
+ - **Reasoning**: The recursive case should reflect the mathematical definition of factorial.
201
+ - **Success criteria**: The function should return `n * factorial(n-1)` for positive integers.
202
 
203
+ Example 3:
204
+ {{Prompt}}: "Explain the process of photosynthesis in plants."
205
+
206
+ ##THOUGHT PROCESS##
207
+ *Subtask 1*:
208
+ - **Description**: Define photosynthesis and its overall purpose in plants.
209
+ - **Reasoning**: Starting with a definition provides context and sets the stage for a detailed explanation.
210
+ - **Success criteria**: Clear and concise definition of photosynthesis, mentioning its role in converting sunlight into chemical energy.
211
+ *Subtask 2*:
212
+ - **Description**: Break down the steps involved in the photosynthesis process (e.g., light-dependent and light-independent reactions).
213
+ - **Reasoning**: Understanding the individual steps helps to grasp the complexity of how plants convert light into usable energy.
214
+ - **Success criteria**: Explain both the light-dependent reactions (e.g., capturing light energy) and the Calvin cycle (sugar formation).
215
+ *Subtask 3*:
216
+ - **Description**: Discuss the importance of photosynthesis to the ecosystem and human life.
217
+ - **Reasoning**: Highlighting the broader implications reinforces the significance of this process beyond the biological aspect.
218
+ - **Success criteria**: Provide examples of how photosynthesis contributes to oxygen production and energy flow in ecosystems.
219
+
220
+ Example 4:
221
+ {{Prompt}}: "Design a user-friendly login interface for a mobile app."
222
+
223
+ ##THOUGHT PROCESS##
224
+ *Subtask 1*:
225
+ - **Description**: Identify key user interface elements (e.g., username field, password field, login button).
226
+ - **Reasoning**: Identifying these core elements ensures the interface includes the necessary components for functionality.
227
+ - **Success criteria**: The interface should include a username input, password input, and a clearly labeled login button.
228
+ *Subtask 2*:
229
+ - **Description**: Focus on the user experience, ensuring simplicity and intuitive navigation.
230
+ - **Reasoning**: An intuitive design ensures a seamless user experience, reducing friction for users during the login process.
231
+ - **Success criteria**: The layout should be minimalistic with clear labels, making the login process simple and quick.
232
+ *Subtask 3*:
233
+ - **Description**: Implement security features like password masking and error handling for incorrect logins.
234
+ - **Reasoning**: Security measures ensure that user data is protected and help guide users when errors occur.
235
+ - **Success criteria**: Passwords should be masked by default, and error messages should be informative but secure (e.g., "Incorrect username or password").
236
+
237
+ Example 5:
238
+ {{Prompt}}: "Outline the steps to bake a chocolate cake from scratch."
239
+
240
+ ##THOUGHT PROCESS##
241
+ *Subtask 1*:
242
+ - **Description**: List all the ingredients required for the cake.
243
+ - **Reasoning**: Starting with ingredients ensures all necessary components are prepared before beginning the process.
244
+ - **Success criteria**: Provide a complete list of ingredients, including measurements (e.g., 2 cups of flour, 1 cup of sugar, etc.).
245
+ *Subtask 2*:
246
+ - **Description**: Describe the preparation steps, such as mixing dry and wet ingredients.
247
+ - **Reasoning**: Detailing the preparation steps ensures that the user follows the correct sequence for combining ingredients.
248
+ - **Success criteria**: Instructions should specify when and how to mix ingredients to achieve the right consistency.
249
+ *Subtask 3*:
250
+ - **Description**: Explain the baking time and temperature.
251
+ - **Reasoning**: Providing accurate baking instructions is crucial for the cake to cook properly.
252
+ - **Success criteria**: Specify an appropriate baking temperature (e.g., 350°F) and time (e.g., 25-30 minutes), along with how to check for doneness.
253
+
254
+ Example 6:
255
+ {{Prompt}}: "Create a marketing plan for a new eco-friendly product."
256
+
257
+ ##THOUGHT PROCESS##
258
+ *Subtask 1*:
259
+ - **Description**: Identify the target audience for the eco-friendly product.
260
+ - **Reasoning**: Defining the target audience is essential for tailoring the marketing message and strategy effectively.
261
+ - **Success criteria**: Provide a detailed description of the ideal customer demographics and psychographics (e.g., age, values, eco-consciousness).
262
+ *Subtask 2*:
263
+ - **Description**: Outline the key messaging and brand positioning.
264
+ - **Reasoning**: Clear messaging ensures the product’s benefits and unique selling points are communicated effectively to the target audience.
265
+ - **Success criteria**: Develop a compelling message that highlights the eco-friendliness, sustainability, and benefits of the product.
266
+ *Subtask 3*:
267
+ - **Description**: Define the marketing channels to be used (e.g., social media, email campaigns, influencer partnerships).
268
+ - **Reasoning**: Selecting the appropriate channels ensures that the marketing plan reaches the right audience in an impactful way.
269
+ - **Success criteria**: Choose a mix of channels based on the target audience’s preferences and behaviors, including both digital and traditional media.
270
+
271
+
272
+ Now, analyze the following expanded prompt and return the subtasks, reasoning, and success criteria.
273
+ Prompt: {expanded_prompt}
274
  """
275
+ return await self.call_llm(decomposition_and_reasoning_prompt)
276
+
277
 
278
+
279
+ async def suggest_enhancements(self, input_prompt, tools_dict={}):
280
+ enhancement_suggestion_prompt = f"""
281
+ You are a highly intelligent assistant specialized in reference suggestion and tool integration.
282
+ Analyze the provided {{input_prompt}} and the available {{tools_dict}} to recommend enhancements:
 
 
 
 
 
 
 
 
 
 
283
 
284
+ - **Reference necessity:** Determine if additional reference materials would benefit the task execution (e.g., websites, documentations, books, articles, etc.)
285
+ - **Tool applicability:** Evaluate if any available tools could enhance efficiency or accuracy
286
+ - **Integration complexity:** Assess the effort required to incorporate suggested resources
287
+ - **Expected impact:** Estimate the potential improvement in output quality
288
+
289
+ If enhancements are warranted, provide structured recommendations in this format:
290
+
291
+ ##REFERENCE SUGGESTIONS##
292
+ (Only if applicable, maximum 3)
293
+ - Reference name/type
294
+ - Purpose: How it enhances the output
295
+ - Integration: How to incorporate it
296
+
297
+ ##TOOL SUGGESTIONS##
298
+ (Only if applicable, maximum 3)
299
+ - Tool name from tools_dict
300
+ - Purpose: How it improves the task
301
+ - Integration: How to implement it
302
+
303
+ If no enhancements would significantly improve the output, return an empty string ""
304
 
305
+ Example 1:
306
+ {{input_prompt}}: "Write a Python function to detect faces in images using computer vision."
307
+ {{tools_dict}}: {{}}
308
+ *output*:
309
+ ##REFERENCE SUGGESTIONS##
310
+ - OpenCV Face Detection Documentation
311
+ Purpose: Provides implementation details and best practices
312
+ Integration: Reference for optimal parameter settings and cascade classifier usage
313
+
314
+ Example 2:
315
+ {{input_prompt}}: "Write a haiku about spring."
316
+ {{tools_dict}}: {{"textblob": "Text processing library", "gpt": "Language model"}}
317
+ *output*:
318
+
319
+
320
+ Example 3:
321
+ {{expanded_prompt}}: "Create a sentiment analysis function for customer reviews."
322
+ {{tools_dict}}: {{}}
323
+ *output*:
324
+ ##REFERENCE SUGGESTIONS##
325
+ - VADER Sentiment Analysis Paper
326
+ Purpose: Provides insights into social media text sentiment analysis
327
+ Integration: Reference for understanding compound sentiment scoring
328
+
329
+ Example 4:
330
+ {{expanded_prompt}}: "Generate a weather forecast report for New York."
331
+ {{tools_dict}}: {{"requests": "HTTP library", "json": "JSON parser", "weather_api": "Weather data service"}}
332
+ *output*:
333
+ ##TOOL SUGGESTIONS##
334
+ - weather_api
335
+ Purpose: Provides real-time weather data
336
+ Integration: Use API endpoints for forecast data retrieval
337
+ - requests
338
+ Purpose: Make HTTP requests to weather API
339
+ Integration: Use requests.get() to fetch weather data
340
+
341
+ Example 5:
342
+ {{expanded_prompt}}: "Calculate the factorial of a number."
343
+ {{tools_dict}}: {{}}
344
+ *output*:
345
 
 
 
 
 
 
346
 
347
+ Example 6:
348
+ {{expanded_prompt}}: "Create an API endpoint documentation."
349
+ {{tools_dict}}: {{"swagger": "API documentation tool", "markdown": "Text formatting", "json_schema": "JSON schema validator"}}
350
+ *output*:
351
+ ##REFERENCE SUGGESTIONS##
352
+ - OpenAPI Specification
353
+ Purpose: Provides standard API documentation format
354
+ Integration: Use as template for documentation structure
355
+ - REST API Best Practices
356
+ Purpose: Ensures documentation follows industry standards
357
+ Integration: Reference for endpoint description patterns
358
+
359
+ ##TOOL SUGGESTIONS##
360
+ - swagger
361
+ Purpose: Generate interactive API documentation
362
+ Integration: Use Swagger UI for visual documentation
363
+ - json_schema
364
+ Purpose: Validate API request/response schemas
365
+ Integration: Define and validate data structures
366
+
367
+ Example 7:
368
+ {{expanded_prompt}}: "Create an API endpoint documentation."
369
+ {{tools_dict}}: {{}}
370
+ *output*:
371
+ ##REFERENCE SUGGESTIONS##
372
+ - OpenAPI Specification
373
+ Purpose: Provides standard API documentation format
374
+ Integration: Use as template for documentation structure
375
+ - REST API Best Practices
376
+ Purpose: Ensures documentation follows industry standards
377
+ Integration: Reference for endpoint description patterns
378
+
379
+
380
+ Now, analyze the following prompt and tools, then return only the generated *output*:
381
+ {{input_prompt}}: {input_prompt}
382
+ {{tools_dict}}: {tools_dict}
383
  """
384
+ return await self.call_llm(enhancement_suggestion_prompt)
385
 
386
+
387
+ async def assemble_prompt(self, components):
388
+ expanded_prompt = components.get("expanded_prompt", "")
389
+ decomposition_and_reasoninng = components.get("decomposition_and_reasoninng", "")
390
+ suggested_enhancements = components.get("suggested_enhancements", "")
391
+
392
+ output_prompt = (
393
+ f"{expanded_prompt}\n\n"
394
+ f"{suggested_enhancements}\n\n"
395
+ f"{decomposition_and_reasoninng}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  )
397
+ return output_prompt
398
+
399
+
400
+ async def enhance_prompt(self, input_prompt):
401
+
402
+ # TODO: Add a function to update the tools_dict
403
+ # TODO: Add function calling method
404
+
405
+ tools_dict = {}
406
+
407
+ expanded_prompt = await self.analyze_and_expand_input(input_prompt)
408
+ suggested_enhancements = await self.suggest_enhancements(input_prompt, tools_dict)
409
+ decomposition_and_reasoning = await self.decompose_and_add_reasoning(expanded_prompt)
410
+
411
  components = {
412
+ "expanded_prompt":expanded_prompt,
413
+ "decomposition_and_reasoninng": decomposition_and_reasoning,
414
+ "suggested_enhancements": suggested_enhancements
 
 
 
 
415
  }
 
 
416
 
417
+ output_prompt = await self.assemble_prompt(components)
418
+
419
+ return output_prompt