r-func / app.py
ruv's picture
Update app.py
409b4f6 verified
raw
history blame contribute delete
No virus
8.47 kB
import gradio as gr
import litellm
import os
import random
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
from interpreter import interpreter
comments = [
"Generating function... πŸš€",
"Testing function... πŸ§ͺ",
"Oops, something went wrong! πŸ˜…",
"Function passed the test! πŸŽ‰",
"Getting everything together... πŸ’ͺ",
"Debugging in progress... πŸ›",
"Unleashing the power of LLMs! 🧠",
"Crafting the perfect function... πŸ› οΈ",
]
conversation_history = []
@retry(stop=stop_after_attempt(3), wait=wait_fixed(2), retry=retry_if_exception_type(litellm.exceptions.AuthenticationError))
def get_llm_response(prompt, model="gpt-4-turbo-preview"):
print(random.choice(comments))
try:
response = litellm.completion(
model=model,
messages=[{"role": "user", "content": prompt}],
temperature=0.7
)
return response.choices[0].message.content
except litellm.exceptions.AuthenticationError as e:
print(f"Authentication Error: {str(e)}")
raise e
def test_function(function_code):
try:
print("Executing the generated function... πŸƒ")
interpreter.auto_run = True
output = interpreter.chat(function_code)
print(f"Function output: {output}")
print("Function passed the test! βœ…")
return True, None
except Exception as e:
print(f"Error occurred: {str(e)} ❌")
return False, str(e)
def generate_and_test_function(prompt, previous_code=None, previous_error=None, iteration=1):
print(f"Generating function for prompt (Iteration {iteration}): {prompt}")
# Append previous code and error to the prompt for context
if previous_code and previous_error:
prompt += f"\nPrevious code:\n{previous_code}\n\nPrevious error:\n{previous_error}\n\n"
prompt += "Please analyze the previous code and error, and provide suggestions and insights to fix the issue."
# Use GPT-3.5 for internal guidance
guidance_prompt = f"Provide guidance and suggestions for generating a function based on the following prompt and conversation history:\n{prompt}\n\nConversation History:\n{conversation_history}"
guidance_response = get_llm_response(guidance_prompt, model="gpt-3.5-turbo")
# Use GPT-4 for final guidance to Open Interpreter
generation_prompt = f"""
{prompt}
Guidance from super intelligent code bot:
{guidance_response}
Please generate a Python function that satisfies the prompt and follows the provided guidance, while adhering to these coding standards:
- Use descriptive and meaningful names for variables, functions, and classes.
- Follow the naming conventions: lowercase with underscores for functions and variables, CamelCase for classes.
- Keep functions small and focused, doing one thing well.
- Use 4 spaces for indentation, and avoid mixing spaces and tabs.
- Limit line length to 79 characters for better readability.
- Use docstrings to document functions, classes, and modules, describing their purpose, parameters, and return values.
- Use comments sparingly, and prefer descriptive names and clear code structure over comments.
- Handle exceptions appropriately and raise exceptions with clear error messages.
- Use blank lines to separate logical sections of code, but avoid excessive blank lines.
- Import modules in a specific order: standard library, third-party, and local imports, separated by blank lines.
- Use consistent quotes (single or double) for strings throughout the codebase.
- Follow the PEP 8 style guide for more detailed coding standards and best practices.
"""
generated_function = get_llm_response(generation_prompt, model="gpt-4")
print("Testing the generated function...")
success, error = test_function(generated_function)
# Append the generated function to the conversation history
conversation_history.append({"role": "assistant", "content": generated_function})
return success, error, generated_function
def save_function_to_file(generated_function, file_name):
with open(file_name, "w") as file:
file.write(generated_function)
print(f"Function saved to {file_name}")
# Example adjustment for the option handling part
def handle_post_success_actions(generated_function):
valid_option = False
while not valid_option:
print("\nOptions:")
# Options list here
option = input("Enter your choice (1-3): ")
if option in ["1", "2", "3"]:
valid_option = True
# Handle each option here
else:
print("Invalid choice. Please try again.")
def main(initial_prompt, run_mode, num_runs, console_output, command_input):
console_output = "Enter the initial prompt for the development process: " + initial_prompt + "\n"
yield console_output, gr.update(value="") # Clear the command input
while True:
console_output += "\nMenu:\n1. Generate and test a function 🎨\n2. Exit πŸ‘‹\n"
yield console_output, gr.update(interactive=True) # Wait for user input
choice = command_input
command_input = ""
yield console_output, gr.update(value="") # Clear the command input
if choice == "1":
if run_mode == "1":
success, error, generated_function = generate_and_test_function(initial_prompt)
if success:
generated_function = handle_post_success_actions(generated_function)
initial_prompt = f"Continue developing the function:\n{generated_function}"
else:
console_output += "Function test failed. 😞\n"
yield console_output, gr.update(interactive=True)
elif run_mode == "2":
for i in range(int(num_runs)):
console_output += f"\nRun {i+1}:\n"
yield console_output, gr.update(interactive=True)
success, error, generated_function = generate_and_test_function(initial_prompt)
if success:
generated_function = handle_post_success_actions(generated_function)
initial_prompt = f"Continue developing the function:\n{generated_function}"
else:
console_output += "Function test failed. 😞\n"
yield console_output, gr.update(interactive=True)
elif run_mode == "3":
while True:
success, error, generated_function = generate_and_test_function(initial_prompt)
if success:
generated_function = handle_post_success_actions(generated_function)
initial_prompt = f"Continue developing the function:\n{generated_function}"
else:
console_output += "Function test failed. Retrying...\n"
yield console_output, gr.update(interactive=True)
elif choice == "2":
console_output += "Exiting... Goodbye! πŸ‘‹\n"
yield console_output, gr.update(interactive=False)
break
else:
console_output += "Invalid choice. Please try again. πŸ˜…\n"
yield console_output, gr.update(interactive=True)
with gr.Blocks() as demo:
gr.Markdown("# LLM-Powered Function Generator")
with gr.Row():
with gr.Column():
initial_prompt = gr.Textbox(label="Initial Prompt")
run_mode = gr.Radio(["1", "2", "3"], label="Run Mode", info="1: Single Run, 2: Multiple Runs, 3: Continuous Mode")
num_runs = gr.Number(label="Number of Runs", visible=False, interactive=True)
start_button = gr.Button("Start")
with gr.Column():
console_output = gr.Textbox(label="Console Output", lines=20)
command_input = gr.Textbox(label="Command Input", lines=1)
run_mode.change(lambda x: gr.update(visible=x=="2"), inputs=run_mode, outputs=num_runs)
start_button.click(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output)
command_input.submit(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output)
demo.queue().launch()