ruv commited on
Commit
813c713
β€’
1 Parent(s): d6a0e48

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import litellm
3
+ import os
4
+ import random
5
+ from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
6
+ from interpreter import interpreter
7
+
8
+ comments = [
9
+ "Generating function... πŸš€",
10
+ "Testing function... πŸ§ͺ",
11
+ "Oops, something went wrong! πŸ˜…",
12
+ "Function passed the test! πŸŽ‰",
13
+ "Getting everything together... πŸ’ͺ",
14
+ "Debugging in progress... πŸ›",
15
+ "Unleashing the power of LLMs! 🧠",
16
+ "Crafting the perfect function... πŸ› οΈ",
17
+ ]
18
+
19
+ conversation_history = []
20
+
21
+ @retry(stop=stop_after_attempt(3), wait=wait_fixed(2), retry=retry_if_exception_type(litellm.exceptions.AuthenticationError))
22
+ def get_llm_response(prompt, model="gpt-4-turbo-preview"):
23
+ print(random.choice(comments))
24
+ try:
25
+ response = litellm.completion(
26
+ model=model,
27
+ messages=[{"role": "user", "content": prompt}],
28
+ temperature=0.7
29
+ )
30
+ return response.choices[0].message.content
31
+ except litellm.exceptions.AuthenticationError as e:
32
+ print(f"Authentication Error: {str(e)}")
33
+ raise e
34
+
35
+ def test_function(function_code):
36
+ try:
37
+ print("Executing the generated function... πŸƒ")
38
+ interpreter.auto_run = True
39
+ output = interpreter.chat(function_code)
40
+ print(f"Function output: {output}")
41
+ print("Function passed the test! βœ…")
42
+ return True, None
43
+ except Exception as e:
44
+ print(f"Error occurred: {str(e)} ❌")
45
+ return False, str(e)
46
+
47
+ def generate_and_test_function(prompt, previous_code=None, previous_error=None, iteration=1):
48
+ print(f"Generating function for prompt (Iteration {iteration}): {prompt}")
49
+
50
+ # Append previous code and error to the prompt for context
51
+ if previous_code and previous_error:
52
+ prompt += f"\nPrevious code:\n{previous_code}\n\nPrevious error:\n{previous_error}\n\n"
53
+ prompt += "Please analyze the previous code and error, and provide suggestions and insights to fix the issue."
54
+
55
+ # Use GPT-3.5 for internal guidance
56
+ guidance_prompt = f"Provide guidance and suggestions for generating a function based on the following prompt and conversation history:\n{prompt}\n\nConversation History:\n{conversation_history}"
57
+ guidance_response = get_llm_response(guidance_prompt, model="gpt-3.5-turbo")
58
+
59
+ # Use GPT-4 for final guidance to Open Interpreter
60
+ generation_prompt = f"""
61
+ {prompt}
62
+
63
+ Guidance from super intelligent code bot:
64
+ {guidance_response}
65
+
66
+ Please generate a Python function that satisfies the prompt and follows the provided guidance, while adhering to these coding standards:
67
+ - Use descriptive and meaningful names for variables, functions, and classes.
68
+ - Follow the naming conventions: lowercase with underscores for functions and variables, CamelCase for classes.
69
+ - Keep functions small and focused, doing one thing well.
70
+ - Use 4 spaces for indentation, and avoid mixing spaces and tabs.
71
+ - Limit line length to 79 characters for better readability.
72
+ - Use docstrings to document functions, classes, and modules, describing their purpose, parameters, and return values.
73
+ - Use comments sparingly, and prefer descriptive names and clear code structure over comments.
74
+ - Handle exceptions appropriately and raise exceptions with clear error messages.
75
+ - Use blank lines to separate logical sections of code, but avoid excessive blank lines.
76
+ - Import modules in a specific order: standard library, third-party, and local imports, separated by blank lines.
77
+ - Use consistent quotes (single or double) for strings throughout the codebase.
78
+ - Follow the PEP 8 style guide for more detailed coding standards and best practices.
79
+ """
80
+ generated_function = get_llm_response(generation_prompt, model="gpt-4")
81
+
82
+ print("Testing the generated function...")
83
+ success, error = test_function(generated_function)
84
+
85
+ # Append the generated function to the conversation history
86
+ conversation_history.append({"role": "assistant", "content": generated_function})
87
+
88
+ return success, error, generated_function
89
+
90
+ def save_function_to_file(generated_function, file_name):
91
+ with open(file_name, "w") as file:
92
+ file.write(generated_function)
93
+ print(f"Function saved to {file_name}")
94
+
95
+ def handle_post_success_actions(generated_function):
96
+ while True:
97
+ print("\nOptions:")
98
+ print("1. Modify the function further")
99
+ print("2. Save the function to a file")
100
+ print("3. Return to main menu")
101
+ option = input("Enter your choice (1-3): ")
102
+ if option == "1":
103
+ modification_prompt = input("Enter the modification prompt: ")
104
+ success, error, modified_function = generate_and_test_function(modification_prompt, generated_function)
105
+ if success:
106
+ generated_function = modified_function
107
+ else:
108
+ print("Modification failed. Keeping the original function.")
109
+ elif option == "2":
110
+ file_name = input("Enter the file name to save the function (e.g., hello_world.py): ")
111
+ save_function_to_file(generated_function, file_name)
112
+ elif option == "3":
113
+ return generated_function
114
+
115
+ def main(initial_prompt, run_mode, num_runs, console_output, command_input):
116
+ console_output = ""
117
+
118
+ while True:
119
+ console_output += "\nMenu:\n1. Generate and test a function 🎨\n2. Exit πŸ‘‹\n"
120
+ choice = command_input
121
+ command_input = ""
122
+
123
+ if choice == "1":
124
+ if run_mode == "1":
125
+ success, error, generated_function = generate_and_test_function(initial_prompt)
126
+ if success:
127
+ generated_function = handle_post_success_actions(generated_function)
128
+ initial_prompt = f"Continue developing the function:\n{generated_function}"
129
+ else:
130
+ console_output += "Function test failed. 😞\n"
131
+ yield console_output
132
+ elif run_mode == "2":
133
+ for i in range(int(num_runs)):
134
+ console_output += f"\nRun {i+1}:\n"
135
+ yield console_output
136
+ success, error, generated_function = generate_and_test_function(initial_prompt)
137
+ if success:
138
+ generated_function = handle_post_success_actions(generated_function)
139
+ initial_prompt = f"Continue developing the function:\n{generated_function}"
140
+ else:
141
+ console_output += "Function test failed. 😞\n"
142
+ yield console_output
143
+ elif run_mode == "3":
144
+ while True:
145
+ success, error, generated_function = generate_and_test_function(initial_prompt)
146
+ if success:
147
+ generated_function = handle_post_success_actions(generated_function)
148
+ initial_prompt = f"Continue developing the function:\n{generated_function}"
149
+ else:
150
+ console_output += "Function test failed. Retrying...\n"
151
+ yield console_output
152
+ elif choice == "2":
153
+ console_output += "Exiting... Goodbye! πŸ‘‹\n"
154
+ yield console_output
155
+ break
156
+ else:
157
+ console_output += "Invalid choice. Please try again. πŸ˜…\n"
158
+ yield console_output
159
+
160
+ yield console_output
161
+
162
+ with gr.Blocks() as demo:
163
+ gr.Markdown("# LLM-Powered Function Generator")
164
+
165
+ with gr.Row():
166
+ with gr.Column():
167
+ initial_prompt = gr.Textbox(label="Initial Prompt")
168
+ run_mode = gr.Radio(["1", "2", "3"], label="Run Mode", info="1: Single Run, 2: Multiple Runs, 3: Continuous Mode")
169
+ num_runs = gr.Number(label="Number of Runs", visible=False, interactive=True)
170
+ start_button = gr.Button("Start")
171
+
172
+ with gr.Column():
173
+ console_output = gr.Textbox(label="Console Output", lines=20)
174
+ command_input = gr.Textbox(label="Command Input", lines=1)
175
+
176
+ run_mode.change(lambda x: gr.update(visible=x=="2"), inputs=run_mode, outputs=num_runs)
177
+
178
+ start_button.click(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output)
179
+ command_input.submit(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output)
180
+
181
+ demo.queue().launch()