Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,17 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
|
7 |
-
"CodeT5-base-sum": "Salesforce/codet5-base-multi-sum"
|
8 |
-
}
|
9 |
|
10 |
-
def
|
11 |
-
|
12 |
-
# Use T5 for Seq2Seq, fallback to text2text-generation for others
|
13 |
-
if "codet5" in model_name.lower():
|
14 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
15 |
-
return pipeline("summarization", model=model, tokenizer=tokenizer)
|
16 |
-
else:
|
17 |
-
# fallback for unsupported models
|
18 |
-
return pipeline("text2text-generation", model=model_name, tokenizer=tokenizer)
|
19 |
-
|
20 |
-
def explain_code(files, model_choice):
|
21 |
-
model_id = MODELS[model_choice]
|
22 |
-
explainer = load_model(model_id)
|
23 |
explanations = []
|
24 |
for file in files:
|
25 |
filename = file.name if hasattr(file, 'name') else 'Uploaded File'
|
26 |
-
# For Gradio 'binary' type, file is bytes, not a file-like object
|
27 |
if isinstance(file, bytes):
|
28 |
code = file.decode('utf-8')
|
29 |
elif hasattr(file, 'read'):
|
@@ -34,27 +22,21 @@ def explain_code(files, model_choice):
|
|
34 |
line_explanations = []
|
35 |
for idx, line in enumerate(lines):
|
36 |
if not line.strip():
|
37 |
-
continue
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
if "codet5" in model_id.lower():
|
42 |
-
result = explainer(line, **gen_kwargs)[0].get('summary_text', '')
|
43 |
-
else:
|
44 |
-
prompt = f"Explain this line of code:\n{line}"
|
45 |
-
result = explainer(prompt, **gen_kwargs)[0].get('generated_text', '')
|
46 |
line_explanations.append(f"Line {idx+1}: {line}\nExplanation: {result.strip()}\n")
|
47 |
explanations.append(f"## {filename}\n" + "\n".join(line_explanations))
|
48 |
return "\n\n".join(explanations)
|
49 |
|
50 |
def main():
|
51 |
with gr.Blocks() as demo:
|
52 |
-
gr.Markdown("# CodeExplainerBot (
|
53 |
-
model_choice = gr.Dropdown(list(MODELS.keys()), label="Select a model", value="CodeT5-small-sum")
|
54 |
code_input = gr.File(label="Upload code files", file_count="multiple", type="binary")
|
55 |
output = gr.Textbox(label="Line-by-line Explanation", lines=20)
|
56 |
explain_btn = gr.Button("Explain Code")
|
57 |
-
explain_btn.click(fn=explain_code, inputs=[code_input,
|
58 |
demo.launch()
|
59 |
|
60 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
+
import google.generativeai as genai
|
3 |
+
import os
|
4 |
+
from huggingface_hub import login
|
5 |
|
6 |
+
# Use your Hugging Face variable for the Gemini API key
|
7 |
+
GEMINI_API_KEY = os.getenv("GOOGLEAPI", "PASTE_YOUR_GEMINI_API_KEY_HERE")
|
8 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
|
|
|
|
9 |
|
10 |
+
def explain_code(files, _):
|
11 |
+
model = genai.GenerativeModel("gemini-pro")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
explanations = []
|
13 |
for file in files:
|
14 |
filename = file.name if hasattr(file, 'name') else 'Uploaded File'
|
|
|
15 |
if isinstance(file, bytes):
|
16 |
code = file.decode('utf-8')
|
17 |
elif hasattr(file, 'read'):
|
|
|
22 |
line_explanations = []
|
23 |
for idx, line in enumerate(lines):
|
24 |
if not line.strip():
|
25 |
+
continue
|
26 |
+
prompt = f"Explain this line of code:\n{line}"
|
27 |
+
response = model.generate_content(prompt)
|
28 |
+
result = response.text if hasattr(response, 'text') else str(response)
|
|
|
|
|
|
|
|
|
|
|
29 |
line_explanations.append(f"Line {idx+1}: {line}\nExplanation: {result.strip()}\n")
|
30 |
explanations.append(f"## {filename}\n" + "\n".join(line_explanations))
|
31 |
return "\n\n".join(explanations)
|
32 |
|
33 |
def main():
|
34 |
with gr.Blocks() as demo:
|
35 |
+
gr.Markdown("# CodeExplainerBot (Gemini API)")
|
|
|
36 |
code_input = gr.File(label="Upload code files", file_count="multiple", type="binary")
|
37 |
output = gr.Textbox(label="Line-by-line Explanation", lines=20)
|
38 |
explain_btn = gr.Button("Explain Code")
|
39 |
+
explain_btn.click(fn=explain_code, inputs=[code_input, None], outputs=output)
|
40 |
demo.launch()
|
41 |
|
42 |
if __name__ == "__main__":
|