|
import gradio as gr |
|
import litellm |
|
import os |
|
import random |
|
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type |
|
from interpreter import interpreter |
|
|
|
comments = [ |
|
"Generating function... π", |
|
"Testing function... π§ͺ", |
|
"Oops, something went wrong! π
", |
|
"Function passed the test! π", |
|
"Getting everything together... πͺ", |
|
"Debugging in progress... π", |
|
"Unleashing the power of LLMs! π§ ", |
|
"Crafting the perfect function... π οΈ", |
|
] |
|
|
|
conversation_history = [] |
|
|
|
@retry(stop=stop_after_attempt(3), wait=wait_fixed(2), retry=retry_if_exception_type(litellm.exceptions.AuthenticationError)) |
|
def get_llm_response(prompt, model="gpt-4-turbo-preview"): |
|
print(random.choice(comments)) |
|
try: |
|
response = litellm.completion( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt}], |
|
temperature=0.7 |
|
) |
|
return response.choices[0].message.content |
|
except litellm.exceptions.AuthenticationError as e: |
|
print(f"Authentication Error: {str(e)}") |
|
raise e |
|
|
|
def test_function(function_code): |
|
try: |
|
print("Executing the generated function... π") |
|
interpreter.auto_run = True |
|
output = interpreter.chat(function_code) |
|
print(f"Function output: {output}") |
|
print("Function passed the test! β
") |
|
return True, None |
|
except Exception as e: |
|
print(f"Error occurred: {str(e)} β") |
|
return False, str(e) |
|
|
|
def generate_and_test_function(prompt, previous_code=None, previous_error=None, iteration=1): |
|
print(f"Generating function for prompt (Iteration {iteration}): {prompt}") |
|
|
|
|
|
if previous_code and previous_error: |
|
prompt += f"\nPrevious code:\n{previous_code}\n\nPrevious error:\n{previous_error}\n\n" |
|
prompt += "Please analyze the previous code and error, and provide suggestions and insights to fix the issue." |
|
|
|
|
|
guidance_prompt = f"Provide guidance and suggestions for generating a function based on the following prompt and conversation history:\n{prompt}\n\nConversation History:\n{conversation_history}" |
|
guidance_response = get_llm_response(guidance_prompt, model="gpt-3.5-turbo") |
|
|
|
|
|
generation_prompt = f""" |
|
{prompt} |
|
|
|
Guidance from super intelligent code bot: |
|
{guidance_response} |
|
|
|
Please generate a Python function that satisfies the prompt and follows the provided guidance, while adhering to these coding standards: |
|
- Use descriptive and meaningful names for variables, functions, and classes. |
|
- Follow the naming conventions: lowercase with underscores for functions and variables, CamelCase for classes. |
|
- Keep functions small and focused, doing one thing well. |
|
- Use 4 spaces for indentation, and avoid mixing spaces and tabs. |
|
- Limit line length to 79 characters for better readability. |
|
- Use docstrings to document functions, classes, and modules, describing their purpose, parameters, and return values. |
|
- Use comments sparingly, and prefer descriptive names and clear code structure over comments. |
|
- Handle exceptions appropriately and raise exceptions with clear error messages. |
|
- Use blank lines to separate logical sections of code, but avoid excessive blank lines. |
|
- Import modules in a specific order: standard library, third-party, and local imports, separated by blank lines. |
|
- Use consistent quotes (single or double) for strings throughout the codebase. |
|
- Follow the PEP 8 style guide for more detailed coding standards and best practices. |
|
""" |
|
generated_function = get_llm_response(generation_prompt, model="gpt-4") |
|
|
|
print("Testing the generated function...") |
|
success, error = test_function(generated_function) |
|
|
|
|
|
conversation_history.append({"role": "assistant", "content": generated_function}) |
|
|
|
return success, error, generated_function |
|
|
|
def save_function_to_file(generated_function, file_name): |
|
with open(file_name, "w") as file: |
|
file.write(generated_function) |
|
print(f"Function saved to {file_name}") |
|
|
|
def handle_post_success_actions(generated_function): |
|
while True: |
|
print("\nOptions:") |
|
print("1. Modify the function further") |
|
print("2. Save the function to a file") |
|
print("3. Return to main menu") |
|
option = input("Enter your choice (1-3): ") |
|
if option == "1": |
|
modification_prompt = input("Enter the modification prompt: ") |
|
success, error, modified_function = generate_and_test_function(modification_prompt, generated_function) |
|
if success: |
|
generated_function = modified_function |
|
else: |
|
print("Modification failed. Keeping the original function.") |
|
elif option == "2": |
|
file_name = input("Enter the file name to save the function (e.g., hello_world.py): ") |
|
save_function_to_file(generated_function, file_name) |
|
elif option == "3": |
|
return generated_function |
|
|
|
def main(initial_prompt, run_mode, num_runs, console_output, command_input): |
|
console_output = "" |
|
|
|
while True: |
|
console_output += "\nMenu:\n1. Generate and test a function π¨\n2. Exit π\n" |
|
choice = command_input |
|
command_input = "" |
|
|
|
if choice == "1": |
|
if run_mode == "1": |
|
success, error, generated_function = generate_and_test_function(initial_prompt) |
|
if success: |
|
generated_function = handle_post_success_actions(generated_function) |
|
initial_prompt = f"Continue developing the function:\n{generated_function}" |
|
else: |
|
console_output += "Function test failed. π\n" |
|
yield console_output |
|
elif run_mode == "2": |
|
for i in range(int(num_runs)): |
|
console_output += f"\nRun {i+1}:\n" |
|
yield console_output |
|
success, error, generated_function = generate_and_test_function(initial_prompt) |
|
if success: |
|
generated_function = handle_post_success_actions(generated_function) |
|
initial_prompt = f"Continue developing the function:\n{generated_function}" |
|
else: |
|
console_output += "Function test failed. π\n" |
|
yield console_output |
|
elif run_mode == "3": |
|
while True: |
|
success, error, generated_function = generate_and_test_function(initial_prompt) |
|
if success: |
|
generated_function = handle_post_success_actions(generated_function) |
|
initial_prompt = f"Continue developing the function:\n{generated_function}" |
|
else: |
|
console_output += "Function test failed. Retrying...\n" |
|
yield console_output |
|
elif choice == "2": |
|
console_output += "Exiting... Goodbye! π\n" |
|
yield console_output |
|
break |
|
else: |
|
console_output += "Invalid choice. Please try again. π
\n" |
|
yield console_output |
|
|
|
yield console_output |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# LLM-Powered Function Generator") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
initial_prompt = gr.Textbox(label="Initial Prompt") |
|
run_mode = gr.Radio(["1", "2", "3"], label="Run Mode", info="1: Single Run, 2: Multiple Runs, 3: Continuous Mode") |
|
num_runs = gr.Number(label="Number of Runs", visible=False, interactive=True) |
|
start_button = gr.Button("Start") |
|
|
|
with gr.Column(): |
|
console_output = gr.Textbox(label="Console Output", lines=20) |
|
command_input = gr.Textbox(label="Command Input", lines=1) |
|
|
|
run_mode.change(lambda x: gr.update(visible=x=="2"), inputs=run_mode, outputs=num_runs) |
|
|
|
start_button.click(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output) |
|
command_input.submit(main, inputs=[initial_prompt, run_mode, num_runs, console_output, command_input], outputs=console_output) |
|
|
|
demo.queue().launch() |
|
|