Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse files- .docker/Dockerfile +20 -0
- .gitignore +2 -0
- .gradio/certificate.pem +31 -0
- README.md +7 -12
- api/__init__py +0 -0
- api/initiate_pipeline_call.py +34 -0
- components/__init__.py +0 -0
- components/bug_finding.py +8 -0
- components/chat.py +13 -0
- components/file_upload.py +10 -0
- components/front_page.py +66 -0
- components/interface.py +51 -0
- components/model_selection.py +4 -0
- components/patch_generation.py +8 -0
- components/patch_geration.py +0 -0
- components/patch_validation.py +8 -0
- components/pattern_matching.py +8 -0
- docker-compose.yml +14 -0
- logo.png +0 -0
- main.py +10 -0
- requirements.txt +75 -0
- ui/FrontPage.py +99 -0
- ui/__init__.py +0 -0
- utils/convert_steps.py +4 -0
- utils/interface_utils.py +105 -0
- utils/process_steps.py +3 -0
- utils/storage.py +11 -0
.docker/Dockerfile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python image
|
| 2 |
+
FROM python:3.13
|
| 3 |
+
|
| 4 |
+
# Set the working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy only requirements first (for better caching)
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install dependencies
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Copy the rest of the application code
|
| 14 |
+
COPY . .
|
| 15 |
+
|
| 16 |
+
# Expose the port Gradio runs on
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
|
| 19 |
+
# Command to run the Gradio app
|
| 20 |
+
CMD ["python", "interface.py"]
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
models
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
README.md
CHANGED
|
@@ -1,12 +1,7 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Code
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Code-Repair-w-LLMs
|
| 3 |
+
app_file: main.py
|
| 4 |
+
sdk: gradio
|
| 5 |
+
sdk_version: 5.6.0
|
| 6 |
+
---
|
| 7 |
+
frontend for code repair w llms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/__init__py
ADDED
|
File without changes
|
api/initiate_pipeline_call.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import os
|
| 3 |
+
import mimetypes
|
| 4 |
+
|
| 5 |
+
API_URL = "http://localhost:8000/api/initiate_pipeline"
|
| 6 |
+
|
| 7 |
+
def initiate_pipeline_call(files, pipeline_steps):
|
| 8 |
+
"""
|
| 9 |
+
Initiate the pipeline with multiple files.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
files: List of tuples containing file information (name, content, type)
|
| 13 |
+
pipeline_steps: Integer representing selected pipeline steps
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
file_data = []
|
| 17 |
+
|
| 18 |
+
for file in files:
|
| 19 |
+
mime_type = mimetypes.guess_type(file.name)[0] or "application/octet-stream"
|
| 20 |
+
|
| 21 |
+
# Ensure we send the actual file content
|
| 22 |
+
with open(file.name, "rb") as f:
|
| 23 |
+
file_data.append(("files", (file.name, f.read(), mime_type)))
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
response = requests.post(
|
| 27 |
+
f"{API_URL}?pipeline_steps={pipeline_steps}",
|
| 28 |
+
files=file_data
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
response.raise_for_status() # Raise exception for bad status codes
|
| 32 |
+
return response.json()
|
| 33 |
+
except requests.exceptions.RequestException as e:
|
| 34 |
+
return f"Error: {str(e)}"
|
components/__init__.py
ADDED
|
File without changes
|
components/bug_finding.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from components.model_selection import create_model_selection_dropdown
|
| 3 |
+
|
| 4 |
+
def create_bug_finding_tab(choices):
|
| 5 |
+
with gr.Column():
|
| 6 |
+
create_model_selection_dropdown(choices)
|
| 7 |
+
gr.Textbox(label="Output", interactive=False)
|
| 8 |
+
gr.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
components/chat.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def create_chat_controls():
|
| 4 |
+
with gr.Row():
|
| 5 |
+
with gr.Column(scale=10):
|
| 6 |
+
msg = gr.Textbox(label="Prompt", placeholder="Enter prompt")
|
| 7 |
+
|
| 8 |
+
with gr.Row():
|
| 9 |
+
with gr.Column(min_width=0, scale=10):
|
| 10 |
+
submit_button = gr.Button("Submit Prompt")
|
| 11 |
+
|
| 12 |
+
return msg, submit_button
|
| 13 |
+
|
components/file_upload.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def create_file_upload_section():
|
| 4 |
+
with gr.Row(equal_height=True): # Ensures both sections are the same height
|
| 5 |
+
with gr.Column(scale=1):
|
| 6 |
+
file_input = gr.File(label="Upload Codebase/Single File", file_types=[".py", ".java", ".c", ".cpp"])
|
| 7 |
+
with gr.Column(scale=3):
|
| 8 |
+
file_content = gr.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
| 9 |
+
|
| 10 |
+
return file_input, file_content
|
components/front_page.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import utils.interface_utils as iutils
|
| 3 |
+
|
| 4 |
+
from api.initiate_pipeline_call import initiate_pipeline_call
|
| 5 |
+
from utils.convert_steps import convert_steps
|
| 6 |
+
from utils.storage import save_jwt_to_session
|
| 7 |
+
from components.interface import create_interface
|
| 8 |
+
from components.model_selection import create_model_selection_dropdown
|
| 9 |
+
|
| 10 |
+
def handle_initiate_pipeline(files, selected_steps, initial_prompt):
|
| 11 |
+
"""Processes the file upload and initiates the pipeline."""
|
| 12 |
+
if not files:
|
| 13 |
+
gr.Warning("Please upload a file/codebase.")
|
| 14 |
+
return gr.update(visible=True), gr.update(visible=False)
|
| 15 |
+
|
| 16 |
+
binary_steps = convert_steps(selected_steps)
|
| 17 |
+
|
| 18 |
+
# TODO: Do something with prompt
|
| 19 |
+
|
| 20 |
+
response = initiate_pipeline_call(files, binary_steps)
|
| 21 |
+
'''
|
| 22 |
+
Delete print below in production
|
| 23 |
+
'''
|
| 24 |
+
print(response)
|
| 25 |
+
save_jwt_to_session(response)
|
| 26 |
+
|
| 27 |
+
return gr.update(visible=False), gr.update(visible=True)
|
| 28 |
+
|
| 29 |
+
def create_full_ui():
|
| 30 |
+
"""Creates the full UI with a front page and the main interface."""
|
| 31 |
+
with gr.Blocks(theme=iutils.custom_theme(), css="style.css") as app:
|
| 32 |
+
# TODO: Replace with actual choices
|
| 33 |
+
choices = ["ChatGPT", "Claude"]
|
| 34 |
+
|
| 35 |
+
# Front Page UI
|
| 36 |
+
with gr.Column(visible=True) as front_page:
|
| 37 |
+
gr.Markdown("# Welcome to Code Repair with LLMs!")
|
| 38 |
+
gr.Markdown("## Upload your files/codebase. All steps of the pipeline will be executed on the initial run.")
|
| 39 |
+
|
| 40 |
+
file_input = gr.Files(
|
| 41 |
+
label="Upload Multiple Files / Codebase",
|
| 42 |
+
file_types=[".py", ".java", ".c", ".cpp"]
|
| 43 |
+
)
|
| 44 |
+
gr.Markdown("Python, Java, C++, and .zip files accepted.")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
steps = ["Bug Finding", "Pattern Matching", "Patch Generation", "Patch Validation"]
|
| 48 |
+
checkboxes = gr.CheckboxGroup(steps, label="Select Desired Steps", value=steps, interactive=False)
|
| 49 |
+
|
| 50 |
+
create_model_selection_dropdown(choices=choices)
|
| 51 |
+
|
| 52 |
+
msg = gr.Textbox(label="Prompt", placeholder="Enter prompt (Optional)")
|
| 53 |
+
initiate_button = gr.Button("Initiate Pipeline")
|
| 54 |
+
|
| 55 |
+
# Main Interface UI (Initially Hidden)
|
| 56 |
+
with gr.Row(visible=False) as main_interface:
|
| 57 |
+
interface = create_interface()
|
| 58 |
+
|
| 59 |
+
# Button click should happen inside the context
|
| 60 |
+
initiate_button.click(
|
| 61 |
+
fn=handle_initiate_pipeline,
|
| 62 |
+
inputs=[file_input, checkboxes, msg],
|
| 63 |
+
outputs=[front_page, main_interface]
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return app
|
components/interface.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import utils.interface_utils as iutils
|
| 4 |
+
from components.chat import create_chat_controls
|
| 5 |
+
from components.bug_finding import create_bug_finding_tab
|
| 6 |
+
from components.pattern_matching import create_pattern_matching_tab
|
| 7 |
+
from components.patch_generation import create_patch_generation_tab
|
| 8 |
+
from components.patch_validation import create_patch_validation_tab
|
| 9 |
+
from components.file_upload import create_file_upload_section
|
| 10 |
+
from components.model_selection import create_model_selection_dropdown
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def create_interface():
|
| 14 |
+
# TODO: Replace with actual choices
|
| 15 |
+
choices = ["ChatGPT", "Claude"]
|
| 16 |
+
|
| 17 |
+
with gr.Blocks(theme=iutils.custom_theme(), css=iutils.custom_css()) as interface:
|
| 18 |
+
with gr.Column():
|
| 19 |
+
# Header with Logo
|
| 20 |
+
with gr.Row():
|
| 21 |
+
gr.Markdown("# Code Repair with LLMs")
|
| 22 |
+
gr.Markdown("") # Filler to push logo to the right
|
| 23 |
+
gr.Image("logo.png", label="Logo", show_download_button=False, show_fullscreen_button=False, show_label=False, height=250, width=450, container=False)
|
| 24 |
+
|
| 25 |
+
# Tabs for Different Features
|
| 26 |
+
with gr.Tab("Chat"):
|
| 27 |
+
with gr.Column():
|
| 28 |
+
steps = ["Bug Finding", "Pattern Matching", "Patch Generation", "Patch Validation"]
|
| 29 |
+
checkboxes = gr.CheckboxGroup(steps, label="Select Desired Steps", value=steps, interactive=True)
|
| 30 |
+
create_model_selection_dropdown(choices)
|
| 31 |
+
with gr.Column():
|
| 32 |
+
chatbot = gr.Chatbot(value=None, type="messages", show_label=True, show_share_button=False)
|
| 33 |
+
msg, submit_button = create_chat_controls()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
with gr.Tab("Bug Finding"):
|
| 37 |
+
create_bug_finding_tab(choices)
|
| 38 |
+
|
| 39 |
+
with gr.Tab("Pattern Matching"):
|
| 40 |
+
create_pattern_matching_tab(choices)
|
| 41 |
+
|
| 42 |
+
with gr.Tab("Patch Generation"):
|
| 43 |
+
create_patch_generation_tab(choices)
|
| 44 |
+
|
| 45 |
+
with gr.Tab("Patch Validation"):
|
| 46 |
+
create_patch_validation_tab(choices)
|
| 47 |
+
|
| 48 |
+
# TODO: need new function to handle future pipeline calls
|
| 49 |
+
# msg.submit(fn=handle_initiate_pipeline, inputs=[files, checkboxes, msg], outputs=[chatbot]) # clone for submit_button
|
| 50 |
+
|
| 51 |
+
return interface
|
components/model_selection.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def create_model_selection_dropdown(choices):
|
| 4 |
+
return gr.Dropdown(label="Model Selection", choices=choices, interactive=True)
|
components/patch_generation.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from components.model_selection import create_model_selection_dropdown
|
| 3 |
+
|
| 4 |
+
def create_patch_generation_tab(choices):
|
| 5 |
+
with gr.Column():
|
| 6 |
+
create_model_selection_dropdown(choices)
|
| 7 |
+
gr.Textbox(label="Output", interactive=False)
|
| 8 |
+
gr.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
components/patch_geration.py
ADDED
|
File without changes
|
components/patch_validation.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from components.model_selection import create_model_selection_dropdown
|
| 3 |
+
|
| 4 |
+
def create_patch_validation_tab(choices):
|
| 5 |
+
with gr.Column():
|
| 6 |
+
create_model_selection_dropdown(choices)
|
| 7 |
+
gr.Textbox(label="Output", interactive=False)
|
| 8 |
+
gr.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
components/pattern_matching.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from components.model_selection import create_model_selection_dropdown
|
| 3 |
+
|
| 4 |
+
def create_pattern_matching_tab(choices):
|
| 5 |
+
with gr.Column():
|
| 6 |
+
create_model_selection_dropdown(choices)
|
| 7 |
+
gr.Textbox(label="Output", interactive=False)
|
| 8 |
+
gr.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: "3.8"
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
gradio_app:
|
| 5 |
+
build:
|
| 6 |
+
context: . # Use the root as context
|
| 7 |
+
dockerfile: .docker/Dockerfile # Specify the Dockerfile inside .docker
|
| 8 |
+
ports:
|
| 9 |
+
- "7860:7860"
|
| 10 |
+
volumes:
|
| 11 |
+
- .:/app
|
| 12 |
+
environment:
|
| 13 |
+
- PYTHONUNBUFFERED=1
|
| 14 |
+
restart: unless-stopped
|
logo.png
ADDED
|
main.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from components.front_page import create_full_ui
|
| 3 |
+
|
| 4 |
+
# Create the UI
|
| 5 |
+
app = create_full_ui()
|
| 6 |
+
|
| 7 |
+
# Launch the app
|
| 8 |
+
if __name__ == "__main__":
|
| 9 |
+
app.launch(share=True, show_api=False)
|
| 10 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==23.2.1
|
| 2 |
+
annotated-types==0.7.0
|
| 3 |
+
anyio==4.8.0
|
| 4 |
+
audioop-lts==0.2.1
|
| 5 |
+
certifi==2025.1.31
|
| 6 |
+
cffi==1.17.1
|
| 7 |
+
charset-normalizer==3.4.1
|
| 8 |
+
click==8.1.8
|
| 9 |
+
colorama==0.4.6
|
| 10 |
+
contourpy==1.3.1
|
| 11 |
+
cycler==0.12.1
|
| 12 |
+
fastapi==0.115.8
|
| 13 |
+
ffmpy==0.5.0
|
| 14 |
+
filelock==3.17.0
|
| 15 |
+
fonttools==4.56.0
|
| 16 |
+
fsspec==2025.2.0
|
| 17 |
+
gevent==24.11.1
|
| 18 |
+
gradio==5.15.0
|
| 19 |
+
gradio_client==1.7.0
|
| 20 |
+
greenlet==3.1.1
|
| 21 |
+
h11==0.14.0
|
| 22 |
+
httpcore==1.0.7
|
| 23 |
+
httpx==0.28.1
|
| 24 |
+
huggingface-hub==0.28.1
|
| 25 |
+
idna==3.10
|
| 26 |
+
Jinja2==3.1.5
|
| 27 |
+
joblib==1.4.2
|
| 28 |
+
kiwisolver==1.4.8
|
| 29 |
+
markdown-it-py==3.0.0
|
| 30 |
+
MarkupSafe==2.1.5
|
| 31 |
+
matplotlib==3.10.0
|
| 32 |
+
mdurl==0.1.2
|
| 33 |
+
mplfinance==0.12.10b0
|
| 34 |
+
numpy==2.2.2
|
| 35 |
+
nvdlib==0.7.9
|
| 36 |
+
orjson==3.10.15
|
| 37 |
+
packaging==24.2
|
| 38 |
+
pandas==2.2.3
|
| 39 |
+
pillow==11.1.0
|
| 40 |
+
pycparser==2.22
|
| 41 |
+
pydantic==2.10.6
|
| 42 |
+
pydantic_core==2.27.2
|
| 43 |
+
pydub==0.25.1
|
| 44 |
+
Pygments==2.19.1
|
| 45 |
+
pyparsing==3.2.1
|
| 46 |
+
python-dateutil==2.9.0.post0
|
| 47 |
+
python-multipart==0.0.20
|
| 48 |
+
pytz==2025.1
|
| 49 |
+
PyYAML==6.0.2
|
| 50 |
+
requests==2.32.3
|
| 51 |
+
rich==13.9.4
|
| 52 |
+
ruff==0.9.6
|
| 53 |
+
safehttpx==0.1.6
|
| 54 |
+
scikit-learn==1.6.1
|
| 55 |
+
scipy==1.15.1
|
| 56 |
+
seaborn==0.13.2
|
| 57 |
+
semantic-version==2.10.0
|
| 58 |
+
setuptools==75.8.0
|
| 59 |
+
shellingham==1.5.4
|
| 60 |
+
six==1.17.0
|
| 61 |
+
sniffio==1.3.1
|
| 62 |
+
starlette==0.45.3
|
| 63 |
+
threadpoolctl==3.5.0
|
| 64 |
+
tomlkit==0.13.2
|
| 65 |
+
tqdm==4.67.1
|
| 66 |
+
typer==0.15.1
|
| 67 |
+
typing_extensions==4.12.2
|
| 68 |
+
tzdata==2025.1
|
| 69 |
+
urllib3==2.3.0
|
| 70 |
+
uvicorn==0.34.0
|
| 71 |
+
websocket-client==1.8.0
|
| 72 |
+
websockets==14.2
|
| 73 |
+
zope.event==5.0
|
| 74 |
+
zope.interface==7.2
|
| 75 |
+
websocket-client
|
ui/FrontPage.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
sys.path.insert(0, os.path.abspath("./"))
|
| 6 |
+
|
| 7 |
+
from source.api_calls.initiate_pipeline_call import initiate_pipeline_call
|
| 8 |
+
from utils.process_steps import process_steps
|
| 9 |
+
|
| 10 |
+
pipeline_steps = 21
|
| 11 |
+
|
| 12 |
+
class FrontPage:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
with gradio.Blocks(css=self.custom_css()) as self.page:
|
| 15 |
+
gradio.Markdown("# Code Repair with LLMs")
|
| 16 |
+
gradio.Markdown("Upload a Python, Java, C, or C++ file for processing")
|
| 17 |
+
|
| 18 |
+
with gradio.Row():
|
| 19 |
+
file_input = gradio.File(label="Upload Code File", file_types=[".py", ".java", ".c", ".cpp"])
|
| 20 |
+
language_input = gradio.Dropdown(choices=["Python", "Java", "C", "C++"], label="Language")
|
| 21 |
+
|
| 22 |
+
file_content = gradio.Code(label="File Contents", language="python", interactive=False, elem_classes=["fixed-height"])
|
| 23 |
+
process_button = gradio.Button("Process")
|
| 24 |
+
|
| 25 |
+
with gradio.Row():
|
| 26 |
+
stage1 = gradio.Textbox(label="Fault Localization", value="Pending", interactive=False, elem_classes=["stage-box"])
|
| 27 |
+
stage2 = gradio.Textbox(label="Pattern Matching", value="Pending", interactive=False, elem_classes=["stage-box"])
|
| 28 |
+
stage3 = gradio.Textbox(label="Patch Generation", value="Pending", interactive=False, elem_classes=["stage-box"])
|
| 29 |
+
stage4 = gradio.Textbox(label="Patch Validation", value="Pending", interactive=False, elem_classes=["stage-box"])
|
| 30 |
+
|
| 31 |
+
output = gradio.Code(label="Processed Output", language="python", elem_classes=["fixed-height"])
|
| 32 |
+
|
| 33 |
+
pipeline_steps_input = gradio.Number(value=pipeline_steps, label="Pipeline Steps")
|
| 34 |
+
|
| 35 |
+
file_input.change(fn=self.display_file_content, inputs=[file_input], outputs=[file_content])
|
| 36 |
+
process_button.click(fn=self.initiate_pipeline,
|
| 37 |
+
inputs=[file_input, language_input, pipeline_steps_input], outputs=[])
|
| 38 |
+
|
| 39 |
+
def custom_css(self):
|
| 40 |
+
return """
|
| 41 |
+
.fixed-height {
|
| 42 |
+
height: 300px !important;
|
| 43 |
+
overflow-y: auto !important;
|
| 44 |
+
}
|
| 45 |
+
.stage-box {
|
| 46 |
+
text-align: center !important;
|
| 47 |
+
font-weight: bold !important;
|
| 48 |
+
}
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def display_file_content(self, file):
|
| 52 |
+
if file is None:
|
| 53 |
+
return "No file uploaded yet."
|
| 54 |
+
try:
|
| 55 |
+
if isinstance(file, str):
|
| 56 |
+
with open(file, 'r') as f:
|
| 57 |
+
return f.read()
|
| 58 |
+
elif hasattr(file, 'name'):
|
| 59 |
+
return file.name
|
| 60 |
+
elif hasattr(file, 'read'):
|
| 61 |
+
return file.read().decode('utf-8')
|
| 62 |
+
else:
|
| 63 |
+
return str(file)
|
| 64 |
+
except Exception as e:
|
| 65 |
+
return f"An error occurred while reading the file: {str(e)}"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def initiate_pipeline(self, file_input, language_input, pipeline_steps):
|
| 69 |
+
if file_input is None or (isinstance(file_input, list) and len(file_input) == 0):
|
| 70 |
+
return "No file uploaded."
|
| 71 |
+
|
| 72 |
+
self.process_file(file_input, language_input)
|
| 73 |
+
|
| 74 |
+
# If file_input is a list, take the first file
|
| 75 |
+
if isinstance(file_input, list):
|
| 76 |
+
file_input = file_input[0]
|
| 77 |
+
|
| 78 |
+
print(f"Processing file: {file_input}")
|
| 79 |
+
return initiate_pipeline_call(file_input, pipeline_steps)
|
| 80 |
+
|
| 81 |
+
def process_file(self, file, language):
|
| 82 |
+
if file is None:
|
| 83 |
+
return "Skipped", "Skipped", "Skipped", "Skipped", "Please upload a file."
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
content = self.display_file_content(file)
|
| 87 |
+
stage1_result = "Complete"
|
| 88 |
+
stage2_result = "Complete"
|
| 89 |
+
stage3_result = "Complete"
|
| 90 |
+
stage4_result = "Complete"
|
| 91 |
+
processed_content = self.process_content(content, language)
|
| 92 |
+
return stage1_result, stage2_result, stage3_result, stage4_result, processed_content
|
| 93 |
+
except Exception as e:
|
| 94 |
+
return "Error", "Error", "Error", "Error", f"An error occurred: {str(e)}"
|
| 95 |
+
|
| 96 |
+
def process_content(self, content, language):
|
| 97 |
+
# This is where you would implement your actual processing logic
|
| 98 |
+
# For now, we'll just return the file content with a message
|
| 99 |
+
return f"Processing {language} code:\n\n{content}\n\nProcessing complete."
|
ui/__init__.py
ADDED
|
File without changes
|
utils/convert_steps.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def convert_steps(selected_steps):
|
| 2 |
+
"""Converts selected steps into a binary value (bitmask)."""
|
| 3 |
+
all_steps = ["Bug Finding", "Pattern Matching", "Patch Generation", "Patch Validation"]
|
| 4 |
+
return sum(1 << i for i, step in enumerate(all_steps) if step in selected_steps)
|
utils/interface_utils.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# event handlers
|
| 2 |
+
|
| 3 |
+
import gradio as gr
|
| 4 |
+
|
| 5 |
+
def custom_theme():
|
| 6 |
+
# Gradio UI styling
|
| 7 |
+
return gr.themes.Ocean(
|
| 8 |
+
primary_hue=gr.themes.Color(c100="#fef9c3", c200="#fef08a", c300="#fde047", c400="#facc15", c50="#fefce8", c500="#eab308", c600="#ca8a04", c700="#a16207", c800="#854d0e", c900="#713f12", c950="#BA9B37"),
|
| 9 |
+
secondary_hue="zinc",
|
| 10 |
+
radius_size="lg",
|
| 11 |
+
).set(
|
| 12 |
+
background_fill_primary='*neutral_900',
|
| 13 |
+
background_fill_secondary='*neutral_700',
|
| 14 |
+
body_background_fill='*secondary_900',
|
| 15 |
+
body_text_color='*neutral_100',
|
| 16 |
+
body_text_color_subdued='*neutral_400',
|
| 17 |
+
border_color_accent='*neutral_900',
|
| 18 |
+
button_secondary_background_fill='linear-gradient(120deg, *secondary_900 0%, *primary_400 50%, *primary_700 100%)',
|
| 19 |
+
button_secondary_background_fill_hover='linear-gradient(120deg, *secondary_900 0%, *primary_400 50%, *primary_700 100%)',
|
| 20 |
+
checkbox_label_background_fill_selected="linear-gradient(120deg, *secondary_900 0%, *primary_400 50%, *primary_700 100%)",
|
| 21 |
+
code_background_fill='*neutral_950',
|
| 22 |
+
color_accent_soft='*primary_950',
|
| 23 |
+
input_background_fill='*neutral_700',
|
| 24 |
+
table_odd_background_fill='*neutral_700'
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
def custom_css():
|
| 28 |
+
return """
|
| 29 |
+
.fixed-height
|
| 30 |
+
{
|
| 31 |
+
height: 300px !important;
|
| 32 |
+
overflow-y: auto !important;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
.stage-box
|
| 36 |
+
{
|
| 37 |
+
text-align: center !important;
|
| 38 |
+
font-weight: bold !important;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.title
|
| 42 |
+
{
|
| 43 |
+
text-align: center;
|
| 44 |
+
font-size: 2rem;
|
| 45 |
+
font-weight: bold;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
.subtitle
|
| 49 |
+
{
|
| 50 |
+
text-align: center;
|
| 51 |
+
font-size: 1.2rem;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
.big-checkboxes label
|
| 55 |
+
{
|
| 56 |
+
font-size: 1.5rem;
|
| 57 |
+
display: block;
|
| 58 |
+
text-align: center;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
.big-button
|
| 62 |
+
{
|
| 63 |
+
font-size: 1.5rem;
|
| 64 |
+
padding: 10px 20px;
|
| 65 |
+
display: block;
|
| 66 |
+
margin: 0 auto;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
def display_file_content(file):
|
| 72 |
+
if file is None:
|
| 73 |
+
return "No file uploaded yet."
|
| 74 |
+
try:
|
| 75 |
+
if isinstance(file, str):
|
| 76 |
+
with open(file, 'r') as f:
|
| 77 |
+
return f.read()
|
| 78 |
+
elif hasattr(file, 'name'):
|
| 79 |
+
return file.name
|
| 80 |
+
elif hasattr(file, 'read'):
|
| 81 |
+
return file.read().decode('utf-8')
|
| 82 |
+
else:
|
| 83 |
+
return str(file)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
return f"An error occurred while reading the file: {str(e)}"
|
| 86 |
+
|
| 87 |
+
def process_file(file, language):
|
| 88 |
+
if file is None:
|
| 89 |
+
return "Skipped", "Skipped", "Skipped", "Skipped", "Please upload a file."
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
content = display_file_content(file)
|
| 93 |
+
stage1_result = "Complete"
|
| 94 |
+
stage2_result = "Complete"
|
| 95 |
+
stage3_result = "Complete"
|
| 96 |
+
stage4_result = "Complete"
|
| 97 |
+
processed_content = process_content(content, language)
|
| 98 |
+
return stage1_result, stage2_result, stage3_result, stage4_result, processed_content
|
| 99 |
+
except Exception as e:
|
| 100 |
+
return "Error", "Error", "Error", "Error", f"An error occurred: {str(e)}"
|
| 101 |
+
|
| 102 |
+
def process_content(content, language):
|
| 103 |
+
# This is where you would implement your actual processing logic
|
| 104 |
+
# For now, we'll just return the file content with a message
|
| 105 |
+
return f"Processing {language} code:\n\n{content}\n\nProcessing complete."
|
utils/process_steps.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
def process_steps():
|
| 3 |
+
return
|
utils/storage.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def save_jwt_to_session(response):
|
| 4 |
+
"""Extracts session_token and session_id from API response and saves them in Gradio State."""
|
| 5 |
+
session_token = response.get("session_token")
|
| 6 |
+
session_id = response.get("session_id")
|
| 7 |
+
|
| 8 |
+
if session_token and session_id:
|
| 9 |
+
return gr.update(value=session_token), gr.update(value=session_id)
|
| 10 |
+
|
| 11 |
+
return gr.update(), gr.update()
|